diff --git a/.github/workflows/run-autoscaling-tests.yml b/.github/workflows/run-autoscaling-tests.yml new file mode 100644 index 000000000..5f4528649 --- /dev/null +++ b/.github/workflows/run-autoscaling-tests.yml @@ -0,0 +1,65 @@ +name: Run Autoscaling Tests + +on: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + all-integration-tests: + name: 'Run Autoscaling tests' + runs-on: ubuntu-latest + steps: + - name: 'Clone repository' + uses: actions/checkout@v3 + with: + fetch-depth: 50 + - name: 'Set up JDK 8' + uses: actions/setup-java@v3 + with: + distribution: 'corretto' + java-version: 8 + - name: 'Configure AWS credentials' + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + - name: 'Set up temp AWS credentials' + run: | + creds=($(aws sts get-session-token \ + --duration-seconds 21600 \ + --query 'Credentials.[AccessKeyId, SecretAccessKey, SessionToken]' \ + --output text \ + | xargs)); + echo "::add-mask::${creds[0]}" + echo "::add-mask::${creds[1]}" + echo "::add-mask::${creds[2]}" + echo "TEMP_AWS_ACCESS_KEY_ID=${creds[0]}" >> $GITHUB_ENV + echo "TEMP_AWS_SECRET_ACCESS_KEY=${creds[1]}" >> $GITHUB_ENV + echo "TEMP_AWS_SESSION_TOKEN=${creds[2]}" >> $GITHUB_ENV + - name: Run integration tests + run: | + ./gradlew --no-parallel --no-daemon test-autoscaling-only + env: + AURORA_CLUSTER_DOMAIN: ${{ secrets.DB_CONN_SUFFIX }} + AURORA_DB_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_ACCESS_KEY_ID: ${{ env.TEMP_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ env.TEMP_AWS_SECRET_ACCESS_KEY }} + AWS_SESSION_TOKEN: ${{ env.TEMP_AWS_SESSION_TOKEN }} + - name: 'Archive junit results' + if: always() + uses: actions/upload-artifact@v3 + with: + name: junit-report + path: ./wrapper/build/test-results + retention-days: 5 + - name: 'Archive autoscaling report' + if: always() + uses: actions/upload-artifact@v3 + with: + name: autoscaling-report + path: ./wrapper/build/report + retention-days: 5 diff --git a/ADOPTERS.md b/ADOPTERS.md new file mode 100644 index 000000000..5aa69e168 --- /dev/null +++ b/ADOPTERS.md @@ -0,0 +1,9 @@ +# Adopters + +This list shows adopters of the AWS Advanced JDBC Wrapper. If you're using AWS Advanced JDBC Wrapper in some way, then please add your team and use-case to this file. + +Tell us more: + +* Send a comment, use-case, case-study, or let us know about your usage: Create a PR against this file and add yourself to the list. + +## Adopters list (alphabetical) diff --git a/CHANGELOG.md b/CHANGELOG.md index 586b4cf2b..9824b4fde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,16 +3,31 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/#semantic-versioning-200). -## [2.2.x] - ??? +## [?] +### :crab: Changed +- Renamed the `messages.properties` file to `aws_advanced_jdbc_wrapper_messages.properties` ([PR #643](https://github.com/awslabs/aws-advanced-jdbc-wrapper/pull/647)). + +## [2.2.4] - 2023-08-29 ### :magic_wand: Added -- Host Availability Strategy to help keep host health status up to date ([PR #530](https://github.com/awslabs/aws-advanced-jdbc-wrapper/pull/530)). +- Host Availability Strategy to help keep host health status up to date ([PR #530](https://github.com/awslabs/aws-advanced-jdbc-wrapper/pull/530)). +- Implement `setLoginTimeout` from a suggested enhancement ([Discussion #509](https://github.com/awslabs/aws-advanced-jdbc-wrapper/discussions/509)). + +### :bug: Fixed +- Allow connecting with reader cluster endpoints for Aurora PostgreSQL versions 13.9 and greater by changing the `AuroraPgDialect` topology query ([Issue #593](https://github.com/awslabs/aws-advanced-jdbc-wrapper/issues/593)). +- Race condition issues between `MonitorThreadContainer#getInstance()` and `MonitorThreadContainer#releaseInstance()` ([PR #601](https://github.com/awslabs/aws-advanced-jdbc-wrapper/pull/601)). ### :crab: Changed - Dynamically sets the default host list provider based on the dialect used. User applications no longer need to manually set the AuroraHostListProvider when connecting to Aurora Postgres or Aurora MySQL databases. -- Deprecated AuroraHostListConnectionPlugin. +- Deprecated AuroraHostListConnectionPlugin. - As an enhancement, the wrapper is now able to automatically set the Aurora host list provider for connections to Aurora MySQL and Aurora PostgreSQL databases. Aurora Host List Connection Plugin is deprecated. If you were using the `AuroraHostListConnectionPlugin`, you can simply remove the plugin from the `wrapperPlugins` parameter. However, if you choose to, you can ensure the provider is used by specifying a topology-aware dialect, for more information, see [Database Dialects](docs/using-the-jdbc-driver/DatabaseDialects.md). +- Propagate `Connection.clearWarnings()` to underlying connections in the Read Write Splitting Plugin so that the connection object does not accumulate warning messages ([Issue #547](https://github.com/awslabs/aws-advanced-jdbc-wrapper/issues/547)). +- Close underlying connections in the Read Write Splitting Plugin after switching to read-write or read-only depending on whether internal connection pooling is used ([PR #583](https://github.com/awslabs/aws-advanced-jdbc-wrapper/pull/583)). +- Sort plugins by default to prevent plugin misconfiguration. This can be disabled by setting the property `autoSortWrapperPluginOrder` to false ([PR #542](https://github.com/awslabs/aws-advanced-jdbc-wrapper/pull/542)). +- Documentation: + - Clarified AWS JDBC Driver limitations with Blue/Green deployments. See [Known Limitations](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/docs/KnownLimitations.md#amazon-rds-bluegreen-deployments). + - Updated and reworded main [README.md](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/README.md) page. ## [2.2.3] - 2023-07-28 ### :magic_wand: Added @@ -170,6 +185,7 @@ The Amazon Web Services (AWS) Advanced JDBC Driver allows an application to take * The [AWS IAM Authentication Connection Plugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md) * The [AWS Secrets Manager Connection Plugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheAwsSecretsManagerPlugin.md) +[2.2.4]: https://github.com/awslabs/aws-advanced-jdbc-wrapper/compare/2.2.3...2.2.4 [2.2.3]: https://github.com/awslabs/aws-advanced-jdbc-wrapper/compare/2.2.2...2.2.3 [2.2.2]: https://github.com/awslabs/aws-advanced-jdbc-wrapper/compare/2.2.1...2.2.2 [2.2.1]: https://github.com/awslabs/aws-advanced-jdbc-wrapper/compare/2.2.0...2.2.1 diff --git a/Maintenance.md b/Maintenance.md index 3230ce23a..2cc31612c 100644 --- a/Maintenance.md +++ b/Maintenance.md @@ -11,7 +11,8 @@ | June 14, 2023 | [Release 2.2.0](https://github.com/awslabs/`aws-advanced-jdbc-wrapper`/releases/tag/2.2.0) | | June 16, 2023 | [Release 2.2.1](https://github.com/awslabs/`aws-advanced-jdbc-wrapper`/releases/tag/2.2.1) | | July 5, 2023 | [Release 2.2.2](https://github.com/awslabs/`aws-advanced-jdbc-wrapper`/releases/tag/2.2.2) | -| July 28, 2023 | [Release 2.2.3](https://github.com/awslabs/`aws-advanced-jdbc-wrapper`/releases/tag/2.2.3) | +| July 31, 2023 | [Release 2.2.3](https://github.com/awslabs/`aws-advanced-jdbc-wrapper`/releases/tag/2.2.3) | +| August 25, 2023 | [Release 2.2.4](https://github.com/awslabs/`aws-advanced-jdbc-wrapper`/releases/tag/2.2.4) | ``aws-advanced-jdbc-wrapper`` [follows semver](https://semver.org/#semantic-versioning-200) which means we will only release breaking changes in major versions. Generally speaking patches will be released to fix existing problems without @@ -66,4 +67,4 @@ from the updated source after the PRs are merged. | Major Version | Latest Minor Version | Status | Initial Release | Maintenance Window Start | Maintenance Window End | |---------------|----------------------|-------------|-----------------|--------------------------|------------------------| | 1 | 1.0.2 | Maintenance | Oct 5, 2022 | Apr 28, 2023 | Apr 28, 2024 | -| 2 | 2.2.2 | Current | Apr 28, 2023 | N/A | N/A | +| 2 | 2.2.4 | Current | Apr 28, 2023 | N/A | N/A | diff --git a/README.md b/README.md index a28f24b9f..3e07f0d75 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ The AWS JDBC Driver is targeted to work with **any** existing JDBC driver. Curre In conjunction with the JDBC Drivers for PostgreSQL, MySQL, and MariaDB, the AWS JDBC Driver enables functionalities from Amazon Aurora such as fast failover for PostgreSQL and MySQL Aurora clusters. It also introduces integration with AWS authentication services such as [AWS Identity and Access Management (IAM)](https://aws.amazon.com/iam/) and [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/). ## About the Wrapper -Hosting a database cluster in the cloud via Aurora is able to provide users with sets of features and configurations to obtain maximum performance and availabilty, such as database failover. However, at the moment, most existing drivers do not currently support those functionalites or are not able to entirely take advantage of it. +Hosting a database cluster in the cloud via Aurora is able to provide users with sets of features and configurations to obtain maximum performance and availability, such as database failover. However, at the moment, most existing drivers do not currently support those functionalities or are not able to entirely take advantage of it. The main idea behind the AWS JDBC Driver is to add a software layer on top of an existing JDBC driver that would enable all the enhancements brought by Aurora, without requiring users to change their workflow with their databases and existing JDBC drivers. @@ -22,7 +22,7 @@ The main idea behind the AWS JDBC Driver is to add a software layer on top of an In an Amazon Aurora database cluster, **failover** is a mechanism by which Aurora automatically repairs the cluster status when a primary DB instance becomes unavailable. It achieves this goal by electing an Aurora Replica to become the new primary DB instance, so that the DB cluster can provide maximum availability to a primary read-write DB instance. The AWS JDBC Driver is designed to understand the situation and coordinate with the cluster in order to provide minimal downtime and allow connections to be very quickly restored in the event of a DB instance failure. ### Benefits of the AWS JDBC Driver -This is partially due to the time required for the DNS of the new primary DB instance to be fully resolved in order to properly direct the connection. The AWS JDBC Driver allows customers to continue using their existing community drivers in addition to having the AWS JDBC Driver fully exploit failover behavior by maintaining a cache of the Aurora cluster topology and each DB instance's role (Aurora Replica or primary DB instance). This topology is provided via a direct query to the Aurora DB, essentially providing a shortcut to bypass the delays caused by DNS resolution. With this knowledge, the AWS JDBC Driver can more closely monitor the Aurora DB cluster status so that a connection to the new primary DB instance can be established as fast as possible. +Although Aurora is able to provide maximum availability through the use of failover, existing client drivers do not currently support this functionality. This is partially due to the time required for the DNS of the new primary DB instance to be fully resolved in order to properly direct the connection. The AWS JDBC Driver allows customers to continue using their existing community drivers in addition to having the AWS JDBC Driver fully exploit failover behavior by maintaining a cache of the Aurora cluster topology and each DB instance's role (Aurora Replica or primary DB instance). This topology is provided via a direct query to the Aurora DB, essentially providing a shortcut to bypass the delays caused by DNS resolution. With this knowledge, the AWS JDBC Driver can more closely monitor the Aurora DB cluster status so that a connection to the new primary DB instance can be established as fast as possible. ### Enhanced Failure Monitoring Since a database failover is usually identified by reaching a network or a connection timeout, the AWS JDBC Driver introduces an enhanced and customizable manner to faster identify a database outage. @@ -38,13 +38,14 @@ Please visit [this page](./docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#usin For more information on how to download the AWS JDBC Driver, minimum requirements to use it, and how to integrate it within your project and with your JDBC driver of choice, please visit the [Getting Started page](./docs/GettingStarted.md). + ### Maven Central You can find our driver by searching in The Central Repository with GroupId and ArtifactId [software.amazon:aws-advanced-jdbc-wrapper][mvn-search]. [![Maven Central](https://maven-badges.herokuapp.com/maven-central/software.amazon.jdbc/aws-advanced-jdbc-wrapper/badge.svg)](https://maven-badges.herokuapp.com/maven-central/software.amazon.jdbc/aws-advanced-jdbc-wrapper) ```xml - + software.amazon @@ -85,8 +86,6 @@ You can find our driver by searching in The Central Repository with GroupId and | `wrapperProfileName` | `PropertyDefinition.PROFILE_NAME` | [ConfigurationProfiles](./docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#configuration-profiles) | **A Secret ARN** has the following format: `arn:aws:secretsmanager:::secret:SecretName-6RandomCharacters` -## Using the AWS JDBC Driver -To find all the documentation and concrete examples on how to use the AWS JDBC Driver, please refer to the [AWS JDBC Driver Documentation](./docs/Documentation.md) page. ## Logging Enabling logging is a very useful mechanism for troubleshooting any issue one might potentially experience while using the AWS JDBC Driver. @@ -96,10 +95,15 @@ In order to learn how to enable and configure logging, check out the [Logging](. ## Documentation Technical documentation regarding the functionality of the AWS JDBC Driver will be maintained in this GitHub repository. Since the AWS JDBC Driver requires an underlying JDBC driver, please refer to the individual driver's documentation for driver-specific information. +### Using the AWS JDBC Driver +To find all the documentation and concrete examples on how to use the AWS JDBC Driver, please refer to the [AWS JDBC Driver Documentation](./docs/Documentation.md) page. + ## Examples | Description | Examples | |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +| Using the AWS JDBC Driver to get a simple connection | [PostgreSQL](examples/AWSDriverExample/src/main/java/software/amazon/PgConnectionSample.java) | +| Using the AWS JDBC Driver with failover handling | [PostgreSQL](examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java) | | Using the AWS IAM Authentication Plugin with `DriverManager` | [PostgreSQL](examples/AWSDriverExample/src/main/java/software/amazon/AwsIamAuthenticationPostgresqlExample.java)
[MySQL](examples/AWSDriverExample/src/main/java/software/amazon/AwsIamAuthenticationMysqlExample.java)
[MariaDB](examples/AWSDriverExample/src/main/java/software/amazon/AwsIamAuthenticationMariadbExample.java) | | Using the AWS Secrets Manager Plugin with `DriverManager` | [PostgreSQL](examples/AWSDriverExample/src/main/java/software/amazon/AwsSecretsManagerConnectionPluginPostgresqlExample.java)
[MySQL](examples/AWSDriverExample/src/main/java/software/amazon/AwsSecretsManagerConnectionPluginMySQLExample.java) | | Using the AWS Credentials Manager to configure an alternative AWS credentials provider. | [PostgreSQL and MySQL](examples/AWSDriverExample/src/main/java/software/amazon/AwsCredentialsManagerExample.java) | @@ -110,6 +114,7 @@ Technical documentation regarding the functionality of the AWS JDBC Driver will | Using HikariCP with the `AWSWrapperDatasource` | [PostgreSQL](examples/HikariExample/src/main/java/software/amazon/HikariExample.java) | | Using HikariCP with the `AWSWrapperDatasource` with failover handling | [PostgreSQL](examples/HikariExample/src/main/java/software/amazon/HikariFailoverExample.java) | | Using Spring and HikariCP with the AWS JDBC Driver | [PostgreSQL](examples/SpringBootHikariExample/README.md) | +| Using Spring and HikariCP with the AWS JDBC Driver and failover handling | [PostgreSQL](examples/SpringTxFailoverExample/README.md) | | Using Spring and Hibernate with the AWS JDBC Driver | [PostgreSQL](examples/SpringHibernateExample/README.md) | | Using Spring and Wildfly with the AWS JDBC Driver | [PostgreSQL](examples/SpringWildflyExample/README.md) | | Using Vert.x and c3p0 with the AWS JDBC Driver | [PostgreSQL](examples/VertxExample/README.md) | diff --git a/benchmarks/README.md b/benchmarks/README.md index 597fec88b..eca3aa6b3 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -7,5 +7,5 @@ The benchmarks do not measure the performance of target JDBC drivers nor the per ## Usage 1. Build the benchmarks with the following command `../gradlew jmhJar`. 1. the JAR file will be outputted to `build/libs` -2. Run the benchmarks with the following command `java -jar build/libs/benchmarks-2.2.2-jmh.jar`. +2. Run the benchmarks with the following command `java -jar build/libs/benchmarks-2.2.4-jmh.jar`. 1. you may have to update the command based on the exact version of the produced JAR file diff --git a/benchmarks/build.gradle.kts b/benchmarks/build.gradle.kts index 8440d8991..f08d42c57 100644 --- a/benchmarks/build.gradle.kts +++ b/benchmarks/build.gradle.kts @@ -20,12 +20,12 @@ plugins { dependencies { jmhImplementation(project(":aws-advanced-jdbc-wrapper")) - implementation("org.postgresql:postgresql:42.5.0") - implementation("mysql:mysql-connector-java:8.0.31") - implementation("org.mariadb.jdbc:mariadb-java-client:3.1.4") + implementation("org.postgresql:postgresql:42.6.0") + implementation("mysql:mysql-connector-java:8.0.33") + implementation("org.mariadb.jdbc:mariadb-java-client:3.2.0") implementation("com.zaxxer:HikariCP:4.0.3") - testImplementation("org.junit.jupiter:junit-jupiter-api:5.9.3") + testImplementation("org.junit.jupiter:junit-jupiter-api:5.10.0") testImplementation("org.mockito:mockito-inline:4.11.0") // 4.11.0 is the last version compatible with Java 8 testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine") } diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md index 2ede9f297..ffb4b5368 100644 --- a/docs/GettingStarted.md +++ b/docs/GettingStarted.md @@ -10,31 +10,33 @@ Before using the AWS Advanced JDBC Driver, you must install: - To use the wrapper with Aurora with PostgreSQL compatibility, install the [PostgreSQL JDBC Driver](https://github.com/pgjdbc/pgjdbc). - To use the wrapper with Aurora with MySQL compatibility, install the [MySQL JDBC Driver](https://github.com/mysql/mysql-connector-j) or [MariaDB JDBC Driver](https://github.com/mariadb-corporation/mariadb-connector-j). -If you are using the AWS JDBC Driver as part of a Gradle project, include the wrapper and underlying driver as dependencies. For example, to include the AWS Advanced JDBC Driver and the PostgreSQL JDBC Driver as dependencies in a Gradle project, update the ```build.gradle``` file as follows: +If you are using the AWS JDBC Driver as part of a Gradle project, include the wrapper and underlying driver as dependencies. For example, to include the AWS JDBC Driver and the PostgreSQL JDBC Driver as dependencies in a Gradle project, update the ```build.gradle``` file as follows: + +> **Note:** Depending on which features of the AWS JDBC Driver you use, you may have additional package requirements. Please refer to this [table](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#list-of-available-plugins) for more information. ```gradle dependencies { - implementation group: 'software.amazon.jdbc', name: 'aws-advanced-jdbc-wrapper', version: '2.2.2' + implementation group: 'software.amazon.jdbc', name: 'aws-advanced-jdbc-wrapper', version: '2.2.4' implementation group: 'org.postgresql', name: 'postgresql', version: '42.5.0' } ``` -## Obtaining the AWS Advanced JDBC Driver +## Obtaining the AWS JDBC Driver ### Direct Download and Installation -You can use pre-compiled packages that can be downloaded directly from [GitHub Releases](https://github.com/awslabs/aws-advanced-jdbc-wrapper/releases) or [Maven Central](https://search.maven.org/search?q=g:software.amazon.jdbc) to install the AWS Advanced JDBC Driver. After downloading the AWS JDBC Driver, install it by including the .jar file in the application's CLASSPATH. +You can use pre-compiled packages that can be downloaded directly from [GitHub Releases](https://github.com/awslabs/aws-advanced-jdbc-wrapper/releases) or [Maven Central](https://search.maven.org/search?q=g:software.amazon.jdbc) to install the AWS JDBC Driver. After downloading the AWS JDBC Driver, install it by including the .jar file in the application's CLASSPATH. For example, the following command uses wget to download the wrapper: ```bash -wget https://github.com/awslabs/aws-advanced-jdbc-wrapper/releases/download/2.2.2/aws-advanced-jdbc-wrapper-2.2.2.jar +wget https://github.com/awslabs/aws-advanced-jdbc-wrapper/releases/download/2.2.4/aws-advanced-jdbc-wrapper-2.2.4.jar ``` Then, the following command adds the AWS JDBC Driver to the CLASSPATH: ```bash -export CLASSPATH=$CLASSPATH:/home/userx/libs/aws-advanced-jdbc-wrapper-2.2.2.jar +export CLASSPATH=$CLASSPATH:/home/userx/libs/aws-advanced-jdbc-wrapper-2.2.4.jar ``` ### As a Maven Dependency @@ -46,7 +48,7 @@ You can use [Maven's dependency management](https://search.maven.org/search?q=g: software.amazon.jdbc aws-advanced-jdbc-wrapper - 2.2.2 + 2.2.4 ``` @@ -57,7 +59,7 @@ You can use [Gradle's dependency management](https://search.maven.org/search?q=g ```gradle dependencies { - implementation group: 'software.amazon.jdbc', name: 'aws-advanced-jdbc-wrapper', version: '2.2.2' + implementation group: 'software.amazon.jdbc', name: 'aws-advanced-jdbc-wrapper', version: '2.2.4' } ``` @@ -65,10 +67,10 @@ To add a Gradle dependency in a Kotlin syntax, use the following configuration: ```kotlin dependencies { - implementation("software.amazon.jdbc:aws-advanced-jdbc-wrapper:2.2.2") + implementation("software.amazon.jdbc:aws-advanced-jdbc-wrapper:2.2.4") } ``` -## Using the AWS Advanced JDBC Driver +## Using the AWS JDBC Driver For more detailed information about how to use and configure the AWS JDBC Driver, please visit [this page](using-the-jdbc-driver/UsingTheJdbcDriver.md). diff --git a/docs/KnownLimitations.md b/docs/KnownLimitations.md index 8e7232f30..5bba07052 100644 --- a/docs/KnownLimitations.md +++ b/docs/KnownLimitations.md @@ -2,4 +2,4 @@ ## Amazon RDS Blue/Green Deployments -The AWS JDBC Driver currently does not support Amazon RDS Blue/Green Deployments and should be avoided. Executing a Blue/Green deployment with the driver will disconnect the driver from the database, and it will be unable to re-establish a connection to an available database instance. +This driver currently does not support switchover in Amazon RDS Blue/Green Deployments. In order to execute a Blue/Green deployment with the driver, please ensure your application is coded to retry the database connection. Retry will allow the driver to re-establish a connection to an available database instance. Without a retry, the driver would not be able to identify an available database instance, after a switchover has happened between the blue and green environments. \ No newline at end of file diff --git a/docs/development-guide/DevelopmentGuide.md b/docs/development-guide/DevelopmentGuide.md index 7998e82de..aeeec268b 100644 --- a/docs/development-guide/DevelopmentGuide.md +++ b/docs/development-guide/DevelopmentGuide.md @@ -155,7 +155,7 @@ Windows: For more information on how to run the integration tests, please visit [Integration Tests](/docs/development-guide/IntegrationTests.md). #### Sample Code -[Connection Test Sample Code](/docs/driver-specific/postgresql/ConnectionSample.java) +[Connection Test Sample Code](./../../examples/AWSDriverExample/src/main/java/software/amazon/PgConnectionSample.java) ## Architecture For more information on how the AWS Advanced JDBC Driver functions and how it is structured, please visit [Architecture](./Architecture.md). diff --git a/docs/driver-specific/postgresql/ConnectionSample.java b/docs/driver-specific/postgresql/ConnectionSample.java deleted file mode 100644 index adc52ecf8..000000000 --- a/docs/driver-specific/postgresql/ConnectionSample.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.sql.*; - -/** - * Simple Connection Test. - */ -public class ConnectionTestSample { - - private static final String CONNECTION_STRING = "jdbc:aws-wrapper:postgresql://database-pg-name.cluster-XYZ.us-east-2.rds.amazonaws.com:5432/connectionSample"; - private static final String USERNAME = "username"; - private static final String PASSWORD = "password"; - - public static void main(String[] args) throws SQLException { - final Properties properties = new Properties(); - - // Configuring connection properties for the underlying JDBC driver. - properties.setProperty("user", USERNAME); - properties.setProperty("password", PASSWORD); - properties.setProperty("loginTimeout", "100"); - - // Configuring connection properties for the JDBC Wrapper. - properties.setProperty("wrapperPlugins", "failover,efm"); - properties.setProperty("wrapperLogUnclosedConnections", "true"); - - try (Connection conn = DriverManager.getConnection(CONNECTION_STRING, properties); - Statement stmt = conn.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT 1")) { - rs.next(); - } - } -} diff --git a/docs/using-the-jdbc-driver/DataSource.md b/docs/using-the-jdbc-driver/DataSource.md index 4d0f8237d..9367c1f0c 100644 --- a/docs/using-the-jdbc-driver/DataSource.md +++ b/docs/using-the-jdbc-driver/DataSource.md @@ -5,9 +5,10 @@ You can use the `DriverManager` class or a datasource to establish a new connect To establish a connection with the AwsWrapperDataSource, you must: -1. Configure the property names for the underlying driver-specific datasource. -2. Target a driver-specific datasource. -3. Configure the driver-specific datasource. +1. Select a driver-specific datasource depending on what underlying driver is being used (for example: `org.postgresql.ds.PGSimpleDataSource` or `com.mysql.cj.jdbc.MysqlDataSource`). +2. Set up basic connection information in the AwsWrapperDataSource. See [this table](#configurable-datasource-properties) for the available options. +3. Configure any needed driver-specific datasource in the AwsWrapperDataSource using the [target dataSource properties](#configurable-datasource-properties). +4. Configure any needed AWS JDBC Driver properties in the AwsWrapperDataSource using the [target dataSource properties](#configurable-datasource-properties). ### Configurable DataSource Properties @@ -15,14 +16,15 @@ See the table below for a list of configurable properties. > **:warning: Note:** If the same connection property is provided both explicitly in the connection URL and in the datasource properties, the value set in the datasource properties will take precedence. -| Property | Configuration Method | Description | Type | Required | Example | -|-----------------------------|--------------------------------|---------------------------------------------------------------------------------------------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------| -| Server name | `setServerName` | The name of the server. | `String` | Yes, if no URL is provided. | `db-server.mydomain.com` | -| Server port | `setServerPort` | The server port. | `String` | No | `5432` | -| Database name | `setDatabase` | The name of the database. | `String` | No | `testDatabase` | -| JDBC URL | `setJdbcUrl` | The URL to connect with. | `String` | No. Either URL or server name should be set. If both URL and server name have been set, URL will take precedence. Please note that some drivers, such as MariaDb, require some parameters to be included particularly in the URL. | `jdbc:postgresql://localhost/postgres` | -| JDBC protocol | `setJdbcProtocol` | The JDBC protocol that will be used. | `String` | Yes, if the JDBC URL has not been set. | `jdbc:postgresql:` | -| Underlying DataSource class | `setTargetDataSourceClassName` | The fully qualified class name of the underlying DataSource class the AWS JDBC Driver should use. | `String` | Yes, if the JDBC URL has not been set. | `org.postgresql.ds.PGSimpleDataSource` | +| Property | Configuration Method | Description | Type | Required | Example | +|------------------------------|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------| +| Server name | `setServerName` | The name of the server. | `String` | Yes, if no URL is provided. | `db-server.mydomain.com` | +| Server port | `setServerPort` | The server port. | `String` | No | `5432` | +| Database name | `setDatabase` | The name of the database. | `String` | No | `testDatabase` | +| JDBC URL | `setJdbcUrl` | The URL to connect with. | `String` | No. Either URL or server name should be set. If both URL and server name have been set, URL will take precedence. Please note that some drivers, such as MariaDb, require some parameters to be included particularly in the URL. | `jdbc:postgresql://localhost/postgres` | +| JDBC protocol | `setJdbcProtocol` | The JDBC protocol that will be used. | `String` | Yes, if the JDBC URL has not been set. | `jdbc:postgresql:` | +| Underlying DataSource class | `setTargetDataSourceClassName` | The fully qualified class name of the underlying DataSource class the AWS JDBC Driver should use. | `String` | Yes, if the JDBC URL has not been set. | `org.postgresql.ds.PGSimpleDataSource` | +| Target DataSource Properties | `setTargetDataSourceProperties` | Any additional properties that are required. This includes properties specific to the current underlying driver as well as any AWS JDBC Driver properties. | `Properties` | No | See this [example](../../examples/AWSDriverExample/src/main/java/software/amazon/DatasourceExample.java). | ## Using the AwsWrapperDataSource with Connection Pooling Frameworks @@ -69,15 +71,17 @@ To use the AWS JDBC Driver with a connection pool, you must: ds.addDataSourceProperty("targetDataSourceClassName", "org.postgresql.ds.PGSimpleDataSource"); ``` -5. Configure the driver-specific datasource, if needed. This step is optional: +5. Configure the driver-specific datasource and any AWS JDBC Driver properties, if needed. This step is optional: ```java Properties targetDataSourceProps = new Properties(); targetDataSourceProps.setProperty("socketTimeout", "10"); + targetDataSourceProps.setProperty("wrapperLoggerLevel", "ALL"); ds.addDataSourceProperty("targetDataSourceProperties", targetDataSourceProps); ``` +> **:warning:Note:** HikariCP supports either DataSource-based configuration or DriverManager-based configuration by specifying the `dataSourceClassName` or the `jdbcUrl`. When using the `AwsWrapperDataSource` you must specify the `dataSourceClassName`, therefore `HikariDataSource.setJdbcUrl` is not supported. For more information see HikariCP's [documentation](https://github.com/brettwooldridge/HikariCP#gear-configuration-knobs-baby). + +### Examples See [here](../../examples/AWSDriverExample/src/main/java/software/amazon/DatasourceExample.java) for a simple AWS Driver Datasource example. See [here](../../examples/HikariExample/src/main/java/software/amazon/HikariExample.java) for a complete Hikari example. - -> **:warning:Note:** HikariCP supports either DataSource-based configuration or DriverManager-based configuration by specifying the `dataSourceClassName` or the `jdbcUrl`. When using the `AwsWrapperDataSource` you must specify the `dataSourceClassName`, therefore `HikariDataSource.setJdbcUrl` is not supported. For more information see HikariCP's [documentation](https://github.com/brettwooldridge/HikariCP#gear-configuration-knobs-baby). diff --git a/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md b/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md index 6f7585836..b0473ce2c 100644 --- a/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md +++ b/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md @@ -11,8 +11,7 @@ The AWS JDBC Driver uses the protocol prefix `jdbc:aws-wrapper:`. Internally, th ## Getting a Connection To get a connection from the AWS JDBC Driver, the user application can either connect with a DriverManager or with a DataSource. -The process of getting a connection with a DriverManager will remain the same as with other JDBC Drivers; -[this example](../driver-specific/postgresql/ConnectionSample.java) demonstrates establishing a connection with the PostgreSQL JDBC driver. +The process of getting a connection with a DriverManager will remain the same as with other JDBC Drivers; [this example](./../../examples/AWSDriverExample/src/main/java/software/amazon/PgConnectionSample.java) demonstrates establishing a connection with the PostgreSQL JDBC driver. Note that when connection properties are configured in both the connection string and with a Properties object, the connection string values will take precedence. Establishing a connection with a DataSource may require some additional steps. For detailed information and examples, review the [Datasource](./DataSource.md) documentation. @@ -104,20 +103,20 @@ DriverConfigurationProfiles.addOrReplaceProfile( ### List of Available Plugins The AWS JDBC Driver has several built-in plugins that are available to use. Please visit the individual plugin page for more details. -| Plugin name | Plugin Code | Database Compatibility | Description | -|------------------------------------------------------------------------------------------------|---------------------------|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Failover Connection Plugin](./using-plugins/UsingTheFailoverPlugin.md) | `failover` | Aurora | Enables the failover functionality supported by Amazon Aurora clusters. Prevents opening a wrong connection to an old writer node dues to stale DNS after failover event. This plugin is enabled by default. | -| [Host Monitoring Connection Plugin](./using-plugins/UsingTheHostMonitoringPlugin.md) | `efm` | Aurora | Enables enhanced host connection failure monitoring, allowing faster failure detection rates. This plugin is enabled by default. | -| Data Cache Connection Plugin | `dataCache` | Any database | Caches results from SQL queries matching the regular expression specified in the `dataCacheTriggerCondition` configuration parameter. | -| Execution Time Connection Plugin | `executionTime` | Any database | Logs the time taken to execute any JDBC method. | -| Log Query Connection Plugin | `logQuery` | Any database | Tracks and logs the SQL statements to be executed. Sometimes SQL statements are not passed directly to the JDBC method as a parameter, such as [executeBatch()](https://docs.oracle.com/javase/8/docs/api/java/sql/Statement.html#executeBatch--). Users can set `enhancedLogQueryEnabled` to `true`, allowing the JDBC Wrapper to obtain SQL statements via Java Reflection.

:warning:**Note:** Enabling Java Reflection may cause a performance degradation. | -| [IAM Authentication Connection Plugin](./using-plugins/UsingTheIamAuthenticationPlugin.md) | `iam` | Any database | Enables users to connect to their Amazon Aurora clusters using AWS Identity and Access Management (IAM). | -| [AWS Secrets Manager Connection Plugin](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | `awsSecretsManager` | Any database | Enables fetching database credentials from the AWS Secrets Manager service. | -| Aurora Stale DNS Plugin | `auroraStaleDns` | Aurora | Prevents incorrectly opening a new connection to an old writer node when DNS records have not yet updated after a recent failover event.

:warning:**Note:** Contrary to `failover` plugin, `auroraStaleDns` plugin doesn't implement failover support itself. It helps to eliminate opening wrong connections to an old writer node after cluster failover is completed.

:warning:**Note:** This logic is already included in `failover` plugin so you can omit using both plugins at the same time. | -| [Aurora Connection Tracker Plugin](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | `auroraConnectionTracker` | Aurora | Tracks all the opened connections. In the event of a cluster failover, the plugin will close all the impacted connections to the node. This plugin is enabled by default. | -| [Driver Metadata Connection Plugin](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | `driverMetaData` | Any database | Allows user application to override the return value of `DatabaseMetaData#getDriverName` | -| [Read Write Splitting Plugin](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | `readWriteSplitting` | Aurora | Enables read write splitting functionality where users can switch between database reader and writer instances. | -| [Developer Plugin](./using-plugins/UsingTheDeveloperPlugin.md) | `dev` | Any database | Helps developers test various everyday scenarios including rare events like network outages and database cluster failover. The plugin allows injecting and raising an expected exception, then verifying how applications handle it. | +| Plugin name | Plugin Code | Database Compatibility | Description | Additional Required Dependencies | +|------------------------------------------------------------------------------------------------|---------------------------|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Failover Connection Plugin](./using-plugins/UsingTheFailoverPlugin.md) | `failover` | Aurora | Enables the failover functionality supported by Amazon Aurora clusters. Prevents opening a wrong connection to an old writer node dues to stale DNS after failover event. This plugin is enabled by default. | None | +| [Host Monitoring Connection Plugin](./using-plugins/UsingTheHostMonitoringPlugin.md) | `efm` | Aurora | Enables enhanced host connection failure monitoring, allowing faster failure detection rates. This plugin is enabled by default. | None | +| Data Cache Connection Plugin | `dataCache` | Any database | Caches results from SQL queries matching the regular expression specified in the `dataCacheTriggerCondition` configuration parameter. | None | +| Execution Time Connection Plugin | `executionTime` | Any database | Logs the time taken to execute any JDBC method. | None | +| Log Query Connection Plugin | `logQuery` | Any database | Tracks and logs the SQL statements to be executed. Sometimes SQL statements are not passed directly to the JDBC method as a parameter, such as [executeBatch()](https://docs.oracle.com/javase/8/docs/api/java/sql/Statement.html#executeBatch--). Users can set `enhancedLogQueryEnabled` to `true`, allowing the JDBC Wrapper to obtain SQL statements via Java Reflection.

:warning:**Note:** Enabling Java Reflection may cause a performance degradation. | None | +| [IAM Authentication Connection Plugin](./using-plugins/UsingTheIamAuthenticationPlugin.md) | `iam` | Any database | Enables users to connect to their Amazon Aurora clusters using AWS Identity and Access Management (IAM). | [AWS Java SDK RDS v2.x](https://central.sonatype.com/artifact/software.amazon.awssdk/rds) | +| [AWS Secrets Manager Connection Plugin](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | `awsSecretsManager` | Any database | Enables fetching database credentials from the AWS Secrets Manager service. | [Jackson Databind](https://central.sonatype.com/artifact/com.fasterxml.jackson.core/jackson-databind)
[AWS Secrets Manager](https://central.sonatype.com/artifact/software.amazon.awssdk/secretsmanager) | +| Aurora Stale DNS Plugin | `auroraStaleDns` | Aurora | Prevents incorrectly opening a new connection to an old writer node when DNS records have not yet updated after a recent failover event.

:warning:**Note:** Contrary to `failover` plugin, `auroraStaleDns` plugin doesn't implement failover support itself. It helps to eliminate opening wrong connections to an old writer node after cluster failover is completed.

:warning:**Note:** This logic is already included in `failover` plugin so you can omit using both plugins at the same time. | None | +| [Aurora Connection Tracker Plugin](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | `auroraConnectionTracker` | Aurora | Tracks all the opened connections. In the event of a cluster failover, the plugin will close all the impacted connections to the node. This plugin is enabled by default. | None | +| [Driver Metadata Connection Plugin](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | `driverMetaData` | Any database | Allows user application to override the return value of `DatabaseMetaData#getDriverName` | None | +| [Read Write Splitting Plugin](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | `readWriteSplitting` | Aurora | Enables read write splitting functionality where users can switch between database reader and writer instances. | None | +| [Developer Plugin](./using-plugins/UsingTheDeveloperPlugin.md) | `dev` | Any database | Helps developers test various everyday scenarios including rare events like network outages and database cluster failover. The plugin allows injecting and raising an expected exception, then verifying how applications handle it. | None | :exclamation: **NOTE**: As an enhancement, the wrapper is now able to automatically set the Aurora host list provider for connections to Aurora MySQL and Aurora PostgreSQL databases. Aurora Host List Connection Plugin is deprecated. If you were using the Aurora Host List Connection Plugin, you can simply remove the plugin from the `wrapperPlugins` parameter. @@ -138,7 +137,7 @@ If there is an unreleased feature you would like to try, it may be available in software.amazon.jdbc aws-advanced-jdbc-wrapper - 2.2.2-SNAPSHOT + 2.2.4-SNAPSHOT system path-to-snapshot-jar diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md index b325c899a..3ad3e5d5b 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md @@ -30,6 +30,7 @@ In addition to the parameters that you can configure for the underlying driver, | `failoverReaderConnectTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt to connect to a reader instance during a reader failover process. | `30000` | | `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` | | `failoverWriterReconnectIntervalMs` | Integer | No | Interval of time in milliseconds to wait between attempts to reconnect to a failed writer during a writer failover process. | `2000` | +| `keepSessionStateOnFailover` | Boolean | No | This parameter will allow connections to retain the session state after failover. When keepSessionStateOnFailover is set to false, connections will need to be reconfigured as seen in the example [here](./../../../examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java). When this parameter is true, the autocommit and readOnly values will be kept. This parameter is only necessary when the session state must be retained and the connection cannot be manually reconfigured by the user.

**Please note:** this parameter will not be able to fully restore the connection session state, as it will only save the autocommit and readOnly values. | `false` | | ~~`enableFailoverStrictReader`~~ | Boolean | No | This parameter is no longer available and, if specified, it will be ignored by the driver. See `failoverMode` (`reader-or-writer` or `strict-reader`) for more details. | | ## Host Pattern @@ -63,10 +64,10 @@ When the AWS JDBC Driver throws a TransactionStateUnknownSQLException, the origi - Repeat that query which was executed when the connection failed and continue work as desired. #### Sample Code -[PostgreSQL Failover Sample Code](../../driver-specific/postgresql/FailoverSample.java) +[PostgreSQL Failover Sample Code](./../../../examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java) >### :warning: Warnings About Proper Usage of the AWS Advanced JDBC Driver ->1. A common practice when using JDBC drivers is to wrap invocations against a Connection object in a try-catch block, and dispose of the Connection object if an Exception is hit. If this practice is left unaltered, the application will lose the fast-failover functionality offered by the JDBC Driver. When failover occurs, the JDBC Driver internally establishes a ready-to-use connection inside the original Connection object before throwing an exception to the user. If this Connection object is disposed of, the newly established connection will be thrown away. The correct practice is to check the SQL error code of the exception and reuse the Connection object if the error code indicates successful failover. The [PostgreSQL Failover Sample Code](../../driver-specific/postgresql/FailoverSample.java) demonstrates this practice. See the section about [Failover Exception Codes](#failover-exception-codes) for more details. +>1. A common practice when using JDBC drivers is to wrap invocations against a Connection object in a try-catch block, and dispose of the Connection object if an Exception is hit. If this practice is left unaltered, the application will lose the fast-failover functionality offered by the JDBC Driver. When failover occurs, the JDBC Driver internally establishes a ready-to-use connection inside the original Connection object before throwing an exception to the user. If this Connection object is disposed of, the newly established connection will be thrown away. The correct practice is to check the SQL error code of the exception and reuse the Connection object if the error code indicates successful failover. The [PostgreSQL Failover Sample Code](./../../../examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java) demonstrates this practice. See the section about [Failover Exception Codes](#failover-exception-codes) for more details.

>2. We highly recommended that you use the cluster and read-only cluster endpoints instead of the direct instance endpoints of your Aurora cluster, unless you are confident in your application's use of instance endpoints. Although the JDBC Driver will correctly failover to the new writer instance when using instance endpoints, use of these endpoints is discouraged because individual instances can spontaneously change reader/writer status when failover occurs. The JDBC Driver will always connect directly to the instance specified if an instance endpoint is provided, so a write-safe connection cannot be assumed if the application uses instance endpoints. diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md index 9babf21f7..3edabab04 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md @@ -3,30 +3,33 @@ ## What is IAM? AWS Identity and Access Management (IAM) grants users access control across all Amazon Web Services. IAM supports granular permissions, giving you the ability to grant different permissions to different users. For more information on IAM and it's use cases, please refer to the [IAM documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html). -## AWS IAM Database Authentication -**Note:** To preserve compatibility with customers using the community driver, IAM Authentication requires the [AWS Java SDK RDS v2.x](https://central.sonatype.com/artifact/software.amazon.awssdk/rds) to be included separately in the classpath. The AWS Java SDK RDS is a runtime dependency and must be resolved. +## Prerequisites +> :warning: **Note:** To preserve compatibility with customers using the community driver, IAM Authentication requires the [AWS Java SDK RDS v2.x](https://central.sonatype.com/artifact/software.amazon.awssdk/rds) to be included separately in the classpath. The AWS Java SDK RDS is a runtime dependency and must be resolved. + +To enable the IAM Authentication Connection Plugin, add the plugin code `iam` to the [`wrapperPlugins`](../UsingTheJdbcDriver.md#connection-plugin-manager-parameters) value, or to the current [driver profile](../UsingTheJdbcDriver.md#connection-plugin-manager-parameters). -The Advanced JDBC Wrapper supports Amazon AWS Identity and Access Management (IAM) authentication. When using AWS IAM database authentication, the host URL must be a valid Amazon endpoint, and not a custom domain or an IP address. +## AWS IAM Database Authentication +The AWS JDBC Driver supports Amazon AWS Identity and Access Management (IAM) authentication. When using AWS IAM database authentication, the host URL must be a valid Amazon endpoint, and not a custom domain or an IP address.
ie. `db-identifier.cluster-XYZ.us-east-2.rds.amazonaws.com` IAM database authentication use is limited to certain database engines. For more information on limitations and recommendations, please [review the IAM documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html). -## How do I use IAM with the AWS Advanced JDBC Driver? +## How do I use IAM with the AWS JDBC Driver? 1. Enable AWS IAM database authentication on an existing database or create a new database with AWS IAM database authentication on the AWS RDS Console: 1. If needed, review the documentation about [creating a new database](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CreateDBInstance.html). 2. If needed, review the documentation about [modifying an existing database](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html). 2. Set up an [AWS IAM policy](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html) for AWS IAM database authentication. -3. [Create a database account](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.DBAccounts.html) using AWS IAM database authentication: +3. [Create a database account](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.DBAccounts.html) using AWS IAM database authentication. This will be the user specified in the connection string or connection properties. 1. Connect to your database of choice using primary logins. 1. For a MySQL database, use the following command to create a new user:
`CREATE USER example_user_name IDENTIFIED WITH AWSAuthenticationPlugin AS 'RDS';` 2. For a PostgreSQL database, use the following command to create a new user:
`CREATE USER db_userx; GRANT rds_iam TO db_userx;` +4. Add the plugin code `iam` to the [`wrapperPlugins`](../UsingTheJdbcDriver.md#connection-plugin-manager-parameters) parameter value. | Parameter | Value | Required | Description | Example Value | |-------------------|:-------:|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------| -| `wrapperPlugins` | String | No | Set to `"iam"` to enable AWS IAM database authentication | `iam` | | `iamDefaultPort` | String | No | This property will override the default port that is used to generate the IAM token. The default port is determined based on the underlying driver protocol. For now, there is support for `jdbc:postgresql:` and `jdbc:mysql:`. Target drivers with different protocols will require users to provide a default port. | `1234` | | `iamHost` | String | No | This property will override the default hostname that is used to generate the IAM token. The default hostname is derived from the connection string. This parameter is required when users are connecting with custom endpoints. | `database.cluster-hash.us-east-1.rds.amazonaws.com` | | `iamRegion` | String | No | This property will override the default region that is used to generate the IAM token. The default region is parsed from the connection string. | `us-east-2` | diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md index 9c6740209..7290a1be1 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md @@ -11,13 +11,14 @@ final Properties properties = new Properties(); properties.setProperty(PropertyDefinition.PLUGINS.name, "readWriteSplitting,failover,efm"); ``` -If you would like to use the read-write splitting plugin without the failover plugin, the Aurora host list plugin must be included before the read-write splitting plugin. This informs the driver that it should query for Aurora's topology. - +If you would like to use the read-write splitting plugin without the failover plugin, make sure you have the `readWriteSplitting` plugin in the `wrapperPlugins` property, and that the failover plugin is not part of it. ``` final Properties properties = new Properties(); -properties.setProperty(PropertyDefinition.PLUGINS.name, "auroraHostList,readWriteSplitting"); +properties.setProperty(PropertyDefinition.PLUGINS.name, "readWriteSplitting"); ``` +> The Aurora Host List Plugin is deprecated after version 2.2.3. To use the Read Write Splitting plugin without failover with versions 2.2.3 and earlier, add the Aurora Host List Plugin to the plugin list like so: `"auroraHostList,readWriteSplitting"`. + ### Supplying the connection string When using the read-write splitting plugin against Aurora clusters, you do not have to supply multiple instance URLs in the connection string. Instead, supply just the URL for the initial instance to which you're connecting. You must also include either the failover plugin or the Aurora host list plugin in your plugin chain so that the driver knows to query Aurora for its topology. See the section on [loading the read-write splitting plugin](#loading-the-read-write-splitting-plugin) for more info. @@ -68,11 +69,7 @@ private static String getPoolKey(HostSpec hostSpec, Properties props) { 2. Call `ConnectionProviderManager.setConnectionProvider`, passing in the `HikariPooledConnectionProvider` you created in step 1. -3. By default, the read-write plugin randomly selects a reader instance the first time that `setReadOnly(true)` is called. If you would like the plugin to select a reader based on the instance with the least connections instead, set the following connection property. Note that this strategy is only available when internal connection pools are enabled - if you set the connection property without enabling internal pools, an exception will be thrown. - -```java -props.setProperty(ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.name, "leastConnections"); -``` +3. By default, the read-write plugin randomly selects a reader instance the first time that `setReadOnly(true)` is called. If you would like the plugin to select a reader based on a different connection strategy, please see the [Connection Strategies](#connection-strategies) section for more information. 4. Continue as normal: create connections and use them as needed. @@ -83,6 +80,23 @@ props.setProperty(ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.name, " ### Example [ReadWriteSplittingPostgresExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java) demonstrates how to enable and configure read-write splitting with the Aws Advanced JDBC Driver. +### Connection Strategies +By default, the read-write plugin randomly selects a reader instance the first time that `setReadOnly(true)` is called. To balance connections to reader instances more evenly, different connection strategies can be used. The following table describes the currently available connection strategies and any relevant configuration parameters for each strategy. + +To indicate which connection strategy to use, the `readerHostSelectorStrategy` configuration parameter can be set to one of the connection strategies in the table below. The following is an example of enabling the least connections strategy: + +```java +props.setProperty(ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.name, "leastConnections"); +``` + +| Connection Strategy | Configuration Parameter | Description | Default Value | +|---------------------|-------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| `random` | This strategy does not have configuration parameters. | The random strategy is the default connection strategy. When switching to a reader connection, the reader instance will be chosen randomly from the available database instances. | N/A | +| `leastConnections` | This strategy does not have configuration parameters. | The least connections strategy will select reader instances based on which database instance has the least number of currently active connections. Note that this strategy is only available when internal connection pools are enabled - if you set the connection property without enabling internal pools, an exception will be thrown. | N/A | +| `roundRobin` | See the following rows for configuration parameters. | The round robin strategy will select a reader instance by taking turns with all available database instances in a cycle. A slight addition to the round robin strategy is the weighted round robin strategy, where more connections will be passed to reader instances based on user specified connection properties. | N/A | +| | `roundRobinHostWeightPairs` | This parameter value must be a `string` type comma separated list of database host-weight pairs in the format `:`. The host represents the database instance name, and the weight represents how many connections should be directed to the host in one cycle through all available hosts. For example, the value `instance-1:1,instance-2:4` means that for every connection to `instance-1`, there will be four connections to `instance-2`.

**Note:** The `` value in the string must be an integer greater than or equal to 1. | `null` | +| | `roundRobinDefaultWeight` | This parameter value must be an integer value in the form of a `string`. This parameter represents the default weight for any hosts that have not been configured with the `roundRobinHostWeightPairs` parameter. For example, if a connection were already established and host weights were set with `roundRobinHostWeightPairs` but a new reader node was added to the database, the new reader node would use the default weight.

**Note:** This value must be an integer greater than or equal to 1. | `1` | + ### Limitations #### General plugin limitations diff --git a/examples/AWSDriverExample/build.gradle.kts b/examples/AWSDriverExample/build.gradle.kts index a11ac1c31..1478eb7fe 100644 --- a/examples/AWSDriverExample/build.gradle.kts +++ b/examples/AWSDriverExample/build.gradle.kts @@ -16,10 +16,10 @@ dependencies { implementation("org.springframework.boot:spring-boot-starter-jdbc:2.7.13") // 2.7.13 is the last version compatible with Java 8 - implementation("org.postgresql:postgresql:42.5.4") - implementation("mysql:mysql-connector-java:8.0.31") - implementation("software.amazon.awssdk:rds:2.20.49") - implementation("software.amazon.awssdk:secretsmanager:2.20.105") + implementation("org.postgresql:postgresql:42.6.0") + implementation("mysql:mysql-connector-java:8.0.33") + implementation("software.amazon.awssdk:rds:2.20.158") + implementation("software.amazon.awssdk:secretsmanager:2.20.154") implementation("com.fasterxml.jackson.core:jackson-databind:2.15.2") implementation(project(":aws-advanced-jdbc-wrapper")) } diff --git a/examples/AWSDriverExample/src/main/java/software/amazon/DatasourceExample.java b/examples/AWSDriverExample/src/main/java/software/amazon/DatasourceExample.java index e0cbd137a..a884edf4e 100644 --- a/examples/AWSDriverExample/src/main/java/software/amazon/DatasourceExample.java +++ b/examples/AWSDriverExample/src/main/java/software/amazon/DatasourceExample.java @@ -30,17 +30,31 @@ public class DatasourceExample { public static void main(String[] args) throws SQLException { AwsWrapperDataSource ds = new AwsWrapperDataSource(); - // Configure the property names for the underlying driver-specific data source: ds.setJdbcProtocol("jdbc:postgresql:"); // Specify the driver-specific data source: ds.setTargetDataSourceClassName("org.postgresql.ds.PGSimpleDataSource"); - // Configure the driver-specific data source: + // Configure basic data source information: + ds.setServerName("db-identifier.cluster-XYZ.us-east-2.rds.amazonaws.com"); + ds.setDatabase("employees"); + ds.setServerPort("5432"); + + // Configure the driver-specific and AWS JDBC Driver properties (optional): Properties targetDataSourceProps = new Properties(); - targetDataSourceProps.setProperty("serverName", "db-identifier.cluster-XYZ.us-east-2.rds.amazonaws.com"); - targetDataSourceProps.setProperty("database", "employees"); - targetDataSourceProps.setProperty("serverPort", "5432"); + + // Alternatively, instead of using the methods above to configure the basic data source information, + // those properties can be set using the target data source properties: + // targetDataSourceProps.setProperty("serverName", "db-identifier.cluster-XYZ.us-east-2.rds.amazonaws.com"); + // targetDataSourceProps.setProperty("database", "employees"); + // targetDataSourceProps.setProperty("serverPort", "5432"); + + // Configure any driver-specific properties: + targetDataSourceProps.setProperty("ssl", "true"); + + // Configure any AWS JDBC Driver properties: + targetDataSourceProps.setProperty("wrapperLoggerLevel", "ALL"); + ds.setTargetDataSourceProperties(targetDataSourceProps); // Try and make a connection: diff --git a/examples/AWSDriverExample/src/main/java/software/amazon/PgConnectionSample.java b/examples/AWSDriverExample/src/main/java/software/amazon/PgConnectionSample.java new file mode 100644 index 000000000..d0642ba59 --- /dev/null +++ b/examples/AWSDriverExample/src/main/java/software/amazon/PgConnectionSample.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +/** + * Simple Connection Test. + */ +public class PgConnectionSample { + + private static final String CONNECTION_STRING = "jdbc:aws-wrapper:postgresql://database-pg-name.cluster-XYZ.us-east-2.rds.amazonaws.com:5432/connectionSample"; + // Alternatively, all connection properties can be set in the connection string: + // private static final String CONNECTION_STRING = + // "jdbc:aws-wrapper:postgresql://database-pg-name.cluster-XYZ.us-east-2.rds.amazonaws.com:5432/connectionSample" + // + "?user=username" + // + "&password=password" + // + "&loginTimeout=100" + // + "&wrapperPlugins=failover,efm" + // + "&wrapperLogUnclosedConnections=true"; + private static final String USERNAME = "username"; + private static final String PASSWORD = "password"; + + public static void main(String[] args) throws SQLException { + final Properties properties = new Properties(); + + // Configuring connection properties for the underlying JDBC driver. + properties.setProperty("user", USERNAME); + properties.setProperty("password", PASSWORD); + properties.setProperty("loginTimeout", "100"); + + // Configuring connection properties for the JDBC Wrapper. + properties.setProperty("wrapperPlugins", "failover,efm"); + properties.setProperty("wrapperLogUnclosedConnections", "true"); + + try (Connection conn = DriverManager.getConnection(CONNECTION_STRING, properties); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT 1")) { + rs.next(); + } + } +} diff --git a/docs/driver-specific/postgresql/FailoverSample.java b/examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java similarity index 82% rename from docs/driver-specific/postgresql/FailoverSample.java rename to examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java index ec66fede8..629c3884c 100644 --- a/docs/driver-specific/postgresql/FailoverSample.java +++ b/examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java @@ -1,30 +1,32 @@ /* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -import software.amazon.jdbc.PropertyDefinition; +package software.amazon; + import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; +import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.plugin.failover.FailoverFailedSQLException; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; import software.amazon.jdbc.plugin.failover.TransactionStateUnknownSQLException; -public class FailoverSample { +public class PgFailoverSample { // User configures connection properties here public static final String POSTGRESQL_CONNECTION_STRING = @@ -41,10 +43,6 @@ public static void main(String[] args) throws SQLException { props.setProperty(PropertyDefinition.USER.name, USERNAME); props.setProperty(PropertyDefinition.PASSWORD.name, PASSWORD); - // AWS Advanced JDBC Wrapper configuration - props.setProperty(PropertyDefinition.TARGET_DRIVER_USER_PROPERTY_NAME.name, "user"); - props.setProperty(PropertyDefinition.TARGET_DRIVER_PASSWORD_PROPERTY_NAME.name, "password"); - // Setup Step: Open connection and create tables - uncomment this section to create table and test values // try (final Connection connection = DriverManager.getConnection(POSTGRESQL_CONNECTION_STRING, props)) { // setInitialSessionSettings(connection); diff --git a/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingMySQLExample.java b/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingMySQLExample.java index 74eed80f3..85911ddf8 100644 --- a/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingMySQLExample.java +++ b/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingMySQLExample.java @@ -117,7 +117,7 @@ public static void processResults(ResultSet results) { public static void setInitialSessionSettings(Connection conn) throws SQLException { try (Statement stmt1 = conn.createStatement()) { // User can edit settings - stmt1.executeUpdate("SET TIME ZONE 'UTC'"); + stmt1.executeUpdate("SET time_zone = 'UTC'"); } } diff --git a/examples/HikariExample/build.gradle.kts b/examples/HikariExample/build.gradle.kts index ee0b44068..2f868c29f 100644 --- a/examples/HikariExample/build.gradle.kts +++ b/examples/HikariExample/build.gradle.kts @@ -15,8 +15,8 @@ */ dependencies { - implementation("org.postgresql:postgresql:42.5.4") - implementation("mysql:mysql-connector-java:8.0.32") + implementation("org.postgresql:postgresql:42.6.0") + implementation("mysql:mysql-connector-java:8.0.33") implementation(project(":aws-advanced-jdbc-wrapper")) implementation("com.zaxxer:HikariCP:4.0.3") } diff --git a/examples/SpringBootHikariExample/README.md b/examples/SpringBootHikariExample/README.md index bb0ad2dea..39d88531c 100644 --- a/examples/SpringBootHikariExample/README.md +++ b/examples/SpringBootHikariExample/README.md @@ -1,10 +1,10 @@ -# Tutorial: Getting started with Spring Boot, Hikari, and the AWS Advanced JDBC Wrapper Driver +# Tutorial: Getting started with Spring Boot, Hikari, and the AWS JDBC Driver -In this tutorial, you will set up a Spring Boot application using Hikari and the AWS Advanced JDBC Driver. +In this tutorial, you will set up a Spring Boot application using Hikari and the AWS JDBC Driver. > Note: this tutorial was written using the following technologies: > - Spring Boot 2.7.0 -> - AWS Advanced JDBC Wrapper 2.2.2 +> - AWS JDBC Driver 2.2.4 > - Postgresql 42.5.4 > - Java 8 @@ -68,7 +68,7 @@ Please note that the sample code inside the AWS JDBC Driver project will use the ## Step 3: Configure the Datasource -In the `application.yml` file, configure Hikari and AWS Advanced JDBC Wrapper Driver as its driver. +In the `application.yml` file, configure Hikari and AWS JDBC Driver as its driver. ```yaml spring: @@ -141,4 +141,3 @@ Start the application by running `./gradlew run` in the terminal. Create an HTTP request to the application by running the following terminal command `curl http://localhost:8080/select1`. This will trigger the query statement `SELECT 1;` and return the results. - diff --git a/examples/SpringBootHikariExample/build.gradle.kts b/examples/SpringBootHikariExample/build.gradle.kts index 2acccbe7f..518d6aaf4 100644 --- a/examples/SpringBootHikariExample/build.gradle.kts +++ b/examples/SpringBootHikariExample/build.gradle.kts @@ -16,13 +16,13 @@ plugins { id("org.springframework.boot") version "2.7.0" - id("io.spring.dependency-management") version "1.1.0" + id("io.spring.dependency-management") version "1.1.3" } dependencies { implementation("org.springframework.boot:spring-boot-starter-data-jdbc") implementation("org.springframework.boot:spring-boot-starter-web") - implementation("org.postgresql:postgresql:42.5.4") + implementation("org.postgresql:postgresql:42.6.0") implementation(project(":aws-advanced-jdbc-wrapper")) } diff --git a/examples/SpringHibernateExample/README.md b/examples/SpringHibernateExample/README.md index aa2e1a5a6..e3f530141 100644 --- a/examples/SpringHibernateExample/README.md +++ b/examples/SpringHibernateExample/README.md @@ -5,7 +5,7 @@ In this tutorial, you will set up a Spring Boot and Hibernate application with t > Note: this tutorial was written using the following technologies: > - Spring Boot 2.7.1 > - Hibernate -> - AWS Advanced JDBC Driver 2.2.2 +> - AWS Advanced JDBC Driver 2.2.4 > - Postgresql 42.5.4 > - Gradle 7 > - Java 11 diff --git a/examples/SpringHibernateExample/build.gradle.kts b/examples/SpringHibernateExample/build.gradle.kts index defd0e6c9..f1e0c980e 100644 --- a/examples/SpringHibernateExample/build.gradle.kts +++ b/examples/SpringHibernateExample/build.gradle.kts @@ -16,13 +16,13 @@ plugins { id("org.springframework.boot") version "2.7.0" - id("io.spring.dependency-management") version "1.1.0" + id("io.spring.dependency-management") version "1.1.3" } dependencies { implementation("org.springframework.boot:spring-boot-starter-data-jpa") implementation("org.springframework.boot:spring-boot-starter-web") - implementation("org.postgresql:postgresql:42.5.4") - implementation("software.amazon.awssdk:rds:2.20.49") + implementation("org.postgresql:postgresql:42.6.0") + implementation("software.amazon.awssdk:rds:2.20.158") implementation(project(":aws-advanced-jdbc-wrapper")) } diff --git a/examples/SpringTxFailoverExample/README.md b/examples/SpringTxFailoverExample/README.md new file mode 100644 index 000000000..02c9328a6 --- /dev/null +++ b/examples/SpringTxFailoverExample/README.md @@ -0,0 +1,285 @@ +# Tutorial: Getting started with Spring Boot and Failover + +In this tutorial, you will set up a Spring Boot application using the AWS JDBC Driver. This sample application will contain an example of how to retry transactions interrupted by failover. This tutorial is an extension of the [Spring Boot HikariCP example](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/examples/SpringBootHikariExample/README.md) and will contain similar elements. + +> Note: this tutorial was written using the following technologies: +> - Spring Boot 2.7.0 +> - AWS JDBC Driver 2.2.4 +> - Postgresql 42.5.4 +> - Java 8 + +## Step 1: Create a Gradle Project + +Create a Gradle Project with the following project hierarchy: +``` +├───src +│ └───main +│ ├───java +│ │ └───software +│ │ └───amazon +│ │ ├───ApiController.java +│ │ ├───Example.java +│ │ ├───ExampleConfiguration.java +│ │ ├───ExampleDao.java +│ │ ├───ExampleDaoImpl.java +│ │ ├───ExampleService.java +│ │ └───SpringTxFailoverExampleApplication.java +│ └───resources +│ └───application.yml +└───────build.gradle.kts +``` + +When creating the `SpringTxFailoverExampleApplication.java` class, add the following code to it. + +```java +package example; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class SpringTxFailoverExampleApplication { + public static void main(String[] args) { + SpringApplication.run(SpringTxFailoverExampleApplication.class, args); + } +} +``` + +This tutorial requires an `EXAMPLE` table with two integer fields: `ID` and `STATUS`. The `Example.java` file contains a representation of an "Example" object. It should contain the following code: +```java +package example; + +public class Example { + + private int id; + + private int status; + + public Example() { + super(); + } + + public int getId() { + return id; + } + + public void setId(int id) { + this.id = id; + } + + public int getStatus() { + return status; + } + + public void setStatus(int name) { + this.status = name; + } + + @Override + public String toString() { + return String.format("Example [id=%s, status=%s]", id, status); + } +} +``` + +You may also use the Spring Initializr to create the boilerplate code: +1. Go to https://start.spring.io/ +2. Select the Maven project and version 2.7.9 of the Spring Boot. +3. Select Java version 8. +4. Click Dependencies and select the following: + - Spring Web + - Spring Data JDBC + - PostgreSQL Driver + +## Step 2: Add the required Gradle Dependencies + +In the `build.gradle.kts` file, add the following dependencies. + +```kotlin +dependencies { + implementation("org.springframework.boot:spring-boot-starter-data-jdbc") + implementation("org.springframework.boot:spring-boot-starter-web") + implementation("org.springframework.retry:spring-retry:1.3.4") + implementation("org.springframework:spring-aspects:5.3.29") + implementation("org.postgresql:postgresql:42.5.4") + implementation("software.amazon.awssdk:rds:2.20.49") + implementation("software.amazon.jdbc:aws-advanced-jdbc-wrapper:latest") +} +``` + +Please note that the sample code inside the AWS JDBC Driver project will use the dependency `implementation(project(":aws-advanced-jdbc-wrapper"))` instead of `implementation("software.amazon.jdbc:aws-advanced-jdbc-wrapper:latest")` as seen above. + +## Step 3: Configure the Datasource + +In the `application.yml` file, configure Hikari and AWS JDBC Driver as its driver. + +Note that in Spring Boot 2 and 3, Hikari is the default DataSource implementation. So, a bean explicitly specifying Hikari as a Datasource is not needed. + +```yaml +spring: + datasource: + url: jdbc:aws-wrapper:postgresql://db-identifier.cluster-XYZ.us-east-2.rds.amazonaws.com:5432/db + username: jane_doe + password: password + driver-class-name: software.amazon.jdbc.Driver + hikari: + exception-override-class-name: software.amazon.jdbc.util.HikariCPSQLException + max-lifetime: 1260000 + auto-commit: false + maximum-pool-size: 3 + data-source-properties: + keepSessionStateOnFailover: true +``` + +Please also note the use of the [`keepSessionStateOnFailover`](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md#failover-parameters) property. When failover occurs, the connection's auto commit value is reset to true. When the auto commit value is set to false or transactions are used, further operations such as a rollback or commit on the same connection will cause errors. This parameter is used when connections cannot be reconfigured manually as seen in this [example](https://github.com/awslabs/aws-advanced-jdbc-wrapper/tree/main/examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java). + +## Step 4: Set up a data access object + +Set up a simple data access object (DAO) interface and implementation. The data access object will be responsible for executing any queries. In this tutorial, only a get method will be included, but other methods are available within the sample code. + +The DAO interface: +```java +package example; + +import java.util.List; +import java.util.Map; + +public interface ExampleDao { + public List> getAll(); +} +``` + +The DAO implementation: +```java +package example; + +import java.util.List; +import java.util.Map; +import javax.sql.DataSource; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Component; + +@Component +public class ExampleDaoImpl implements ExampleDao { + @Autowired + private DataSource dataSource; + + @Override + public List> getAll() { + final String sql = "SELECT * FROM EXAMPLE"; + final JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); + return jdbcTemplate.queryForList(sql); + } +} +``` + +## Step 5: Set up a configuration class + +The `ExampleConfiguration.java` file will contain a bean for the transaction manager. The autowired datasource will be configured based on the `application.yml` file contents. + +```java +package example; + +import javax.sql.DataSource; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.jdbc.datasource.DataSourceTransactionManager; +import org.springframework.retry.annotation.EnableRetry; + +@Configuration +@EnableRetry +public class ExampleConfiguration { + + @Autowired + private DataSource dataSource; + + @Bean + public DataSourceTransactionManager getDataSourceTransactionManager() { + return new DataSourceTransactionManager(dataSource); + } +} +``` + +## Step 6: Set up a service + +Set up a service class, which will contain an autowired `exampleDao`. + +```java +package example; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.retry.support.RetrySynchronizationManager; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +@Service +@Transactional +public class ExampleService { + private final Logger logger = LoggerFactory.getLogger(this.getClass()); + + @Autowired + private ExampleDao exampleDao; + + public List get() { + logger.info("Retry Number : {}", RetrySynchronizationManager.getContext().getRetryCount()); + List> rows = exampleDao.getAll(); + List examples = new ArrayList<>(); + for (Map row : rows) { + Example obj = new Example(); + obj.setId(((Integer) row.get("ID"))); + obj.setStatus((Integer) row.get("STATUS")); + examples.add(obj); + } + return examples; + } +} +``` + +## Step 7: Set up a controller + +Create a new `ApiController` class: + +```java +package example; + +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.retry.annotation.Backoff; +import org.springframework.retry.annotation.Retryable; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; +import software.amazon.jdbc.plugin.failover.FailoverSQLException; + +@RestController +public class ApiController { + + @Autowired + private ExampleService exampleService; + + @GetMapping(value = "/get") + @Retryable(value = {FailoverSQLException.class}, maxAttempts = 3, backoff = @Backoff(delay = 5000)) + public List get() { + return exampleService.get(); + } +} +``` + +The `@RestController` annotation on the class will allow methods in it to use annotations for mapping HTTP requests. +In this example, the `get()` method is annotated with `@GetMapping(value = "/get")` which will route requests with the path `/get` to that method. +Within the `get()` method, the service is called to perform other operations and return its results. + +The `@EnableRetry` and `@Retryable` annotations allow methods to be retried based on the given value. In the sample above, `value = {FailoverSQLException.class}` indicates that all methods will be retried if a `FailoverSQLException` is thrown. + +## Step 8: Run and call the application + +Start the application by running `./gradlew :springtxfailover:bootRun` in the terminal. + +Create an HTTP request to the application by running the following terminal command `curl localhost:8080/get`. +This will trigger the query statement `SELECT * FROM EXAMPLE;` and return the results. diff --git a/examples/SpringTxFailoverExample/build.gradle.kts b/examples/SpringTxFailoverExample/build.gradle.kts new file mode 100644 index 000000000..385588db4 --- /dev/null +++ b/examples/SpringTxFailoverExample/build.gradle.kts @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +plugins { + id("org.springframework.boot") version "2.7.0" + id("io.spring.dependency-management") version "1.1.3" +} + +dependencies { + implementation("org.springframework.boot:spring-boot-starter-data-jdbc") + implementation("org.springframework.boot:spring-boot-starter-web") + implementation("org.springframework.retry:spring-retry:1.3.4") + implementation("org.springframework:spring-aspects:5.3.29") + implementation("org.postgresql:postgresql") + implementation(project(":aws-advanced-jdbc-wrapper")) +} diff --git a/examples/SpringTxFailoverExample/gradle.properties b/examples/SpringTxFailoverExample/gradle.properties new file mode 100644 index 000000000..dc802102f --- /dev/null +++ b/examples/SpringTxFailoverExample/gradle.properties @@ -0,0 +1,16 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Do not publish the Jar file for this subproject +nexus.publish=false diff --git a/examples/SpringTxFailoverExample/src/main/java/example/ApiController.java b/examples/SpringTxFailoverExample/src/main/java/example/ApiController.java new file mode 100644 index 000000000..94a1ae39f --- /dev/null +++ b/examples/SpringTxFailoverExample/src/main/java/example/ApiController.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package example; + +import java.util.List; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.retry.annotation.Backoff; +import org.springframework.retry.annotation.Retryable; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; +import software.amazon.jdbc.plugin.failover.FailoverSQLException; + +@RestController +public class ApiController { + + @Autowired + private ExampleService exampleService; + + @GetMapping(value = "/get") + @Retryable(value = {FailoverSQLException.class}, maxAttempts = 3, backoff = @Backoff(delay = 5000)) + public List get() { + return exampleService.get(); + } + + @GetMapping(value = "/add") + @Retryable(value = {FailoverSQLException.class}, maxAttempts = 3, backoff = @Backoff(delay = 5000)) + public void add() { + exampleService.add(); + } + + @GetMapping(value = "/delete") + @Retryable(value = {FailoverSQLException.class}, maxAttempts = 3, backoff = @Backoff(delay = 5000)) + public void delete() { + exampleService.delete(); + } +} diff --git a/examples/SpringTxFailoverExample/src/main/java/example/Example.java b/examples/SpringTxFailoverExample/src/main/java/example/Example.java new file mode 100644 index 000000000..0e00fb2f3 --- /dev/null +++ b/examples/SpringTxFailoverExample/src/main/java/example/Example.java @@ -0,0 +1,49 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package example; + +public class Example { + + private int id; + + private int status; + + public Example() { + super(); + } + + public int getId() { + return id; + } + + public void setId(int id) { + this.id = id; + } + + public int getStatus() { + return status; + } + + public void setStatus(int name) { + this.status = name; + } + + @Override + public String toString() { + return String.format("Example [id=%s, status=%s]", id, status); + } +} diff --git a/examples/SpringTxFailoverExample/src/main/java/example/ExampleConfiguration.java b/examples/SpringTxFailoverExample/src/main/java/example/ExampleConfiguration.java new file mode 100644 index 000000000..bb39e7383 --- /dev/null +++ b/examples/SpringTxFailoverExample/src/main/java/example/ExampleConfiguration.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package example; + +import javax.sql.DataSource; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.jdbc.datasource.DataSourceTransactionManager; +import org.springframework.retry.annotation.EnableRetry; + +@Configuration +@EnableRetry +public class ExampleConfiguration { + + @Autowired + private DataSource dataSource; + + @Bean + public DataSourceTransactionManager getDataSourceTransactionManager() { + return new DataSourceTransactionManager(dataSource); + } +} diff --git a/examples/SpringTxFailoverExample/src/main/java/example/ExampleDao.java b/examples/SpringTxFailoverExample/src/main/java/example/ExampleDao.java new file mode 100644 index 000000000..a094b5d22 --- /dev/null +++ b/examples/SpringTxFailoverExample/src/main/java/example/ExampleDao.java @@ -0,0 +1,28 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package example; + +import java.util.List; +import java.util.Map; + +public interface ExampleDao { + public void create(Example example); + + public void delete(); + + public List> getAll(); +} diff --git a/examples/SpringTxFailoverExample/src/main/java/example/ExampleDaoImpl.java b/examples/SpringTxFailoverExample/src/main/java/example/ExampleDaoImpl.java new file mode 100644 index 000000000..ba243747d --- /dev/null +++ b/examples/SpringTxFailoverExample/src/main/java/example/ExampleDaoImpl.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package example; + +import java.util.List; +import java.util.Map; +import javax.sql.DataSource; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Component; + +@Component +public class ExampleDaoImpl implements ExampleDao { + @Autowired + private DataSource dataSource; + + @Override + public List> getAll() { + final String sql = "SELECT * FROM EXAMPLE"; + final JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); + return jdbcTemplate.queryForList(sql); + } + + @Override + public void create(Example example) { + final String sql = "INSERT INTO EXAMPLE (STATUS, ID) VALUES (?,?)"; + final JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); + jdbcTemplate.update(sql, example.getId(), example.getStatus()); + } + + @Override + public void delete() { + final JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); + jdbcTemplate.execute("TRUNCATE EXAMPLE"); + } +} diff --git a/examples/SpringTxFailoverExample/src/main/java/example/ExampleService.java b/examples/SpringTxFailoverExample/src/main/java/example/ExampleService.java new file mode 100644 index 000000000..876f295a6 --- /dev/null +++ b/examples/SpringTxFailoverExample/src/main/java/example/ExampleService.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package example; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.retry.support.RetrySynchronizationManager; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +@Service +@Transactional +public class ExampleService { + private final Logger logger = LoggerFactory.getLogger(this.getClass()); + + @Autowired + private ExampleDao exampleDao; + + public List get() { + logger.info("Retry Number : {}", RetrySynchronizationManager.getContext().getRetryCount()); + List> rows = exampleDao.getAll(); + List examples = new ArrayList<>(); + for (Map row : rows) { + Example obj = new Example(); + obj.setId(((Integer) row.get("ID"))); + obj.setStatus((Integer) row.get("STATUS")); + examples.add(obj); + } + return examples; + } + + public void add() { + logger.info("Retry Number : {}", RetrySynchronizationManager.getContext().getRetryCount()); + final Example example = new Example(); + example.setId(0); + example.setStatus(0); + exampleDao.create(example); + } + + public void delete() { + logger.info("Retry Number : {}", RetrySynchronizationManager.getContext().getRetryCount()); + exampleDao.delete(); + } +} diff --git a/examples/SpringTxFailoverExample/src/main/java/example/SpringTxFailoverExampleApplication.java b/examples/SpringTxFailoverExample/src/main/java/example/SpringTxFailoverExampleApplication.java new file mode 100644 index 000000000..ed21f0a98 --- /dev/null +++ b/examples/SpringTxFailoverExample/src/main/java/example/SpringTxFailoverExampleApplication.java @@ -0,0 +1,27 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package example; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class SpringTxFailoverExampleApplication { + public static void main(String[] args) { + SpringApplication.run(SpringTxFailoverExampleApplication.class, args); + } +} diff --git a/examples/SpringTxFailoverExample/src/main/resources/application.yml b/examples/SpringTxFailoverExample/src/main/resources/application.yml new file mode 100644 index 000000000..31f8417a8 --- /dev/null +++ b/examples/SpringTxFailoverExample/src/main/resources/application.yml @@ -0,0 +1,13 @@ +spring: + datasource: + url: jdbc:aws-wrapper:postgresql://db-identifier.cluster-XYZ.us-east-2.rds.amazonaws.com:5432/db + username: jane_doe + password: password + driver-class-name: software.amazon.jdbc.Driver + hikari: + exception-override-class-name: software.amazon.jdbc.util.HikariCPSQLException + max-lifetime: 1260000 + auto-commit: false + maximum-pool-size: 3 + data-source-properties: + keepSessionStateOnFailover: true diff --git a/examples/SpringWildflyExample/README.md b/examples/SpringWildflyExample/README.md index 7dd977c58..0776fd0af 100644 --- a/examples/SpringWildflyExample/README.md +++ b/examples/SpringWildflyExample/README.md @@ -5,7 +5,7 @@ In this tutorial, you will set up a Wildfly and Spring Boot application with the > Note: this tutorial was written using the following technologies: > - Spring Boot 2.7.1 > - Wildfly 26.1.1 Final -> - AWS Advanced JDBC Wrapper 2.2.2 +> - AWS Advanced JDBC Wrapper 2.2.4 > - Postgresql 42.5.4 > - Gradle 7 > - Java 11 @@ -38,7 +38,7 @@ Create a Gradle project with the following project hierarchy: │ └───main │ │ │───module.xml │ │ │───postgresql-42.5.4.jar - │ │ └───aws-advanced-jdbc-wrapper-2.2.2.jar + │ │ └───aws-advanced-jdbc-wrapper-2.2.4.jar └───standalone ├───configuration ├───amazon @@ -135,7 +135,7 @@ Since this example uses the PostgreSQL JDBC driver as the target driver, you nee - + diff --git a/examples/SpringWildflyExample/spring/build.gradle.kts b/examples/SpringWildflyExample/spring/build.gradle.kts index b11fb2096..375e064dd 100644 --- a/examples/SpringWildflyExample/spring/build.gradle.kts +++ b/examples/SpringWildflyExample/spring/build.gradle.kts @@ -16,14 +16,14 @@ plugins { id("org.springframework.boot") version "2.7.0" - id("io.spring.dependency-management") version "1.1.0" + id("io.spring.dependency-management") version "1.1.3" } dependencies { implementation("org.springframework.boot:spring-boot-starter-jdbc") implementation("org.springframework.boot:spring-boot-starter-web") runtimeOnly("org.springframework.boot:spring-boot-devtools") - implementation("org.postgresql:postgresql:42.5.4") - implementation("software.amazon.awssdk:rds:2.20.49") + implementation("org.postgresql:postgresql:42.6.0") + implementation("software.amazon.awssdk:rds:2.20.158") implementation(project(":aws-advanced-jdbc-wrapper")) } diff --git a/examples/SpringWildflyExample/wildfly/modules/software/amazon/jdbc/main/module.xml b/examples/SpringWildflyExample/wildfly/modules/software/amazon/jdbc/main/module.xml index eeac13ea6..72d15f803 100644 --- a/examples/SpringWildflyExample/wildfly/modules/software/amazon/jdbc/main/module.xml +++ b/examples/SpringWildflyExample/wildfly/modules/software/amazon/jdbc/main/module.xml @@ -19,7 +19,7 @@ - + diff --git a/examples/VertxExample/README.md b/examples/VertxExample/README.md index d2295fffe..a9a411d03 100644 --- a/examples/VertxExample/README.md +++ b/examples/VertxExample/README.md @@ -3,7 +3,7 @@ In this tutorial, you will set up a Vert.x application with the AWS JDBC Driver, and use the driver to execute some simple database operations on an Aurora PostgreSQL database. > Note: this tutorial was written using the following technologies: -> - AWS JDBC Wrapper 2.2.2 +> - AWS JDBC Wrapper 2.2.4 > - PostgreSQL 42.5.4 > - Java 8 > - Vert.x 4.4.2 diff --git a/examples/VertxExample/build.gradle.kts b/examples/VertxExample/build.gradle.kts index 3d9475aa9..6c26b2a78 100644 --- a/examples/VertxExample/build.gradle.kts +++ b/examples/VertxExample/build.gradle.kts @@ -33,13 +33,13 @@ application { } dependencies { - implementation(platform("io.vertx:vertx-stack-depchain:4.4.2")) + implementation(platform("io.vertx:vertx-stack-depchain:4.4.5")) implementation("io.vertx:vertx-core") implementation("io.vertx:vertx-config") implementation("io.vertx:vertx-jdbc-client") implementation("io.vertx:vertx-web") implementation("com.fasterxml.jackson.core:jackson-databind:2.15.2") - implementation("org.postgresql:postgresql:42.5.4") + implementation("org.postgresql:postgresql:42.6.0") implementation(project(":aws-advanced-jdbc-wrapper")) } diff --git a/gradle.properties b/gradle.properties index 44c9c2271..2b02dc8f3 100644 --- a/gradle.properties +++ b/gradle.properties @@ -14,6 +14,6 @@ aws-advanced-jdbc-wrapper.version.major=2 aws-advanced-jdbc-wrapper.version.minor=2 -aws-advanced-jdbc-wrapper.version.subminor=3 +aws-advanced-jdbc-wrapper.version.subminor=4 snapshot=false nexus.publish=true diff --git a/settings.gradle.kts b/settings.gradle.kts index 811fd0086..cff67448f 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -25,6 +25,7 @@ include( "springhibernate", "springwildfly", "springboothikariexample", + "springtxfailover", "vertxexample" ) @@ -35,6 +36,7 @@ project(":driverexample").projectDir = file("examples/AWSDriverExample") project(":springhibernate").projectDir = file("examples/SpringHibernateExample") project(":springwildfly").projectDir = file("examples/SpringWildflyExample/spring") project(":springboothikariexample").projectDir = file("examples/SpringBootHikariExample") +project(":springtxfailover").projectDir = file("examples/SpringTxFailoverExample") project(":vertxexample").projectDir = file("examples/VertxExample") pluginManagement { @@ -43,7 +45,7 @@ pluginManagement { fun PluginDependenciesSpec.idv(id: String, key: String = id) = id(id) version key.v() id("biz.aQute.bnd.builder") version "6.4.0" - id("com.github.spotbugs") version "5.0.+" + id("com.github.spotbugs") version "5.1.+" id("com.diffplug.spotless") version "6.13.0" // 6.13.0 is the last version that is compatible with Java 8 id("com.github.vlsi.gradle-extensions") version "1.+" id("com.github.vlsi.stage-vote-release") version "1.+" diff --git a/wrapper/build.gradle.kts b/wrapper/build.gradle.kts index 30ce968bb..50f9b7ca4 100644 --- a/wrapper/build.gradle.kts +++ b/wrapper/build.gradle.kts @@ -27,43 +27,43 @@ plugins { } dependencies { - implementation("org.checkerframework:checker-qual:3.26.0") - compileOnly("software.amazon.awssdk:rds:2.20.49") + implementation("org.checkerframework:checker-qual:3.39.0") + compileOnly("software.amazon.awssdk:rds:2.20.158") compileOnly("com.zaxxer:HikariCP:4.0.3") // Version 4.+ is compatible with Java 8 - compileOnly("software.amazon.awssdk:secretsmanager:2.20.105") + compileOnly("software.amazon.awssdk:secretsmanager:2.20.154") compileOnly("com.fasterxml.jackson.core:jackson-databind:2.15.2") - compileOnly("mysql:mysql-connector-java:8.0.31") - compileOnly("org.postgresql:postgresql:42.5.0") - compileOnly("org.mariadb.jdbc:mariadb-java-client:3.1.4") - compileOnly("org.osgi:org.osgi.core:4.3.0") + compileOnly("mysql:mysql-connector-java:8.0.33") + compileOnly("org.postgresql:postgresql:42.6.0") + compileOnly("org.mariadb.jdbc:mariadb-java-client:3.2.0") + compileOnly("org.osgi:org.osgi.core:6.0.0") - testImplementation("org.junit.platform:junit-platform-commons:1.9.0") - testImplementation("org.junit.platform:junit-platform-engine:1.9.3") + testImplementation("org.junit.platform:junit-platform-commons:1.10.0") + testImplementation("org.junit.platform:junit-platform-engine:1.10.0") testImplementation("org.junit.platform:junit-platform-launcher:1.10.0") testImplementation("org.junit.platform:junit-platform-suite-engine:1.10.0") - testImplementation("org.junit.jupiter:junit-jupiter-api:5.9.3") + testImplementation("org.junit.jupiter:junit-jupiter-api:5.10.0") testImplementation("org.junit.jupiter:junit-jupiter-params:5.10.0") testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine") - testImplementation("org.apache.commons:commons-dbcp2:2.9.0") - testImplementation("org.postgresql:postgresql:42.5.0") - testImplementation("mysql:mysql-connector-java:8.0.31") - testImplementation("org.mariadb.jdbc:mariadb-java-client:3.1.4") + testImplementation("org.apache.commons:commons-dbcp2:2.10.0") + testImplementation("org.postgresql:postgresql:42.6.0") + testImplementation("mysql:mysql-connector-java:8.0.33") + testImplementation("org.mariadb.jdbc:mariadb-java-client:3.2.0") testImplementation("com.zaxxer:HikariCP:4.0.3") // Version 4.+ is compatible with Java 8 testImplementation("org.springframework.boot:spring-boot-starter-jdbc:2.7.13") // 2.7.13 is the last version compatible with Java 8 testImplementation("org.mockito:mockito-inline:4.11.0") // 4.11.0 is the last version compatible with Java 8 - testImplementation("software.amazon.awssdk:rds:2.20.49") + testImplementation("software.amazon.awssdk:rds:2.20.158") testImplementation("software.amazon.awssdk:ec2:2.20.105") - testImplementation("software.amazon.awssdk:secretsmanager:2.20.105") - testImplementation("org.testcontainers:testcontainers:1.18.3") - testImplementation("org.testcontainers:mysql:1.18.3") - testImplementation("org.testcontainers:postgresql:1.18.3") - testImplementation("org.testcontainers:mariadb:1.18.3") - testImplementation("org.testcontainers:junit-jupiter:1.17.4") - testImplementation("org.testcontainers:toxiproxy:1.18.3") + testImplementation("software.amazon.awssdk:secretsmanager:2.20.154") + testImplementation("org.testcontainers:testcontainers:1.19.0") + testImplementation("org.testcontainers:mysql:1.19.1") + testImplementation("org.testcontainers:postgresql:1.19.1") + testImplementation("org.testcontainers:mariadb:1.19.1") + testImplementation("org.testcontainers:junit-jupiter:1.19.1") + testImplementation("org.testcontainers:toxiproxy:1.19.1") testImplementation("eu.rekawek.toxiproxy:toxiproxy-java:2.1.7") - testImplementation("org.apache.poi:poi-ooxml:5.2.2") - testImplementation("org.slf4j:slf4j-simple:2.0.7") + testImplementation("org.apache.poi:poi-ooxml:5.2.4") + testImplementation("org.slf4j:slf4j-simple:2.0.9") testImplementation("com.fasterxml.jackson.core:jackson-databind:2.15.2") } diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java index b7c413f6f..5071786de 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.dialect.Dialect; /** @@ -59,12 +60,13 @@ boolean acceptsUrl( * @param role determines if the connection provider should return a writer or a reader * @param strategy the strategy determining how the {@link HostSpec} should be selected, e.g., * random or round-robin + * @param props any properties that are required by the provided strategy to select a host * @return the {@link HostSpec} selected using the specified strategy * @throws SQLException if an error occurred while returning the hosts * @throws UnsupportedOperationException if the strategy is unsupported by the provider */ HostSpec getHostSpecByStrategy( - @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) + @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy, @Nullable Properties props) throws SQLException, UnsupportedOperationException; /** diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java index 74cadf021..384da5c7b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java @@ -134,6 +134,7 @@ public boolean acceptsStrategy(HostRole role, String strategy) { * @param role the desired role of the host - either a writer or a reader * @param strategy the strategy that should be used to select a {@link HostSpec} from the host * list (eg "random") + * @param props any properties that are required by the provided strategy to select a host * @return a {@link HostSpec} with the requested role * @throws SQLException if the available {@link ConnectionProvider} instances * cannot find a host in the host list matching the @@ -141,14 +142,14 @@ public boolean acceptsStrategy(HostRole role, String strategy) { * @throws UnsupportedOperationException if the available {@link ConnectionProvider} instances do * not support the requested strategy */ - public HostSpec getHostSpecByStrategy(List hosts, HostRole role, String strategy) + public HostSpec getHostSpecByStrategy(List hosts, HostRole role, String strategy, Properties props) throws SQLException, UnsupportedOperationException { HostSpec host = null; if (connProvider != null) { connProviderLock.readLock().lock(); try { if (connProvider != null && connProvider.acceptsStrategy(role, strategy)) { - host = connProvider.getHostSpecByStrategy(hosts, role, strategy); + host = connProvider.getHostSpecByStrategy(hosts, role, strategy, props); } } catch (UnsupportedOperationException e) { // The custom provider does not support the provided strategy, ignore it and try with the default provider. @@ -158,7 +159,7 @@ public HostSpec getHostSpecByStrategy(List hosts, HostRole role, Strin } if (host == null) { - host = defaultProvider.getHostSpecByStrategy(hosts, role, strategy); + host = defaultProvider.getHostSpecByStrategy(hosts, role, strategy, props); } return host; diff --git a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java index 76ed24ba3..523901916 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java @@ -27,6 +27,7 @@ import java.util.logging.Logger; import javax.sql.DataSource; import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.exceptions.SQLLoginException; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; @@ -46,7 +47,8 @@ public class DataSourceConnectionProvider implements ConnectionProvider { private static final Map acceptedStrategies = Collections.unmodifiableMap(new HashMap() { { - put("random", new RandomHostSelector()); + put(RandomHostSelector.STRATEGY_RANDOM, new RandomHostSelector()); + put(RoundRobinHostSelector.STRATEGY_ROUND_ROBIN, new RoundRobinHostSelector()); } }); private final @NonNull DataSource dataSource; @@ -86,7 +88,7 @@ public boolean acceptsStrategy(@NonNull HostRole role, @NonNull String strategy) @Override public HostSpec getHostSpecByStrategy( - @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) + @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy, @Nullable Properties props) throws SQLException { if (!acceptedStrategies.containsKey(strategy)) { throw new UnsupportedOperationException( @@ -95,7 +97,7 @@ public HostSpec getHostSpecByStrategy( new Object[] {strategy, DataSourceConnectionProvider.class})); } - return acceptedStrategies.get(strategy).getHost(hosts, role); + return acceptedStrategies.get(strategy).getHost(hosts, role, props); } /** diff --git a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java index 18afdc8ab..f1af5aeea 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java @@ -25,6 +25,7 @@ import java.util.Properties; import java.util.logging.Logger; import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.exceptions.SQLLoginException; import software.amazon.jdbc.targetdriverdialect.ConnectInfo; @@ -43,7 +44,8 @@ public class DriverConnectionProvider implements ConnectionProvider { private static final Map acceptedStrategies = Collections.unmodifiableMap(new HashMap() { { - put("random", new RandomHostSelector()); + put(RandomHostSelector.STRATEGY_RANDOM, new RandomHostSelector()); + put(RoundRobinHostSelector.STRATEGY_ROUND_ROBIN, new RoundRobinHostSelector()); } }); @@ -81,7 +83,7 @@ public boolean acceptsStrategy(@NonNull HostRole role, @NonNull String strategy) @Override public HostSpec getHostSpecByStrategy( - @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) + @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy, @Nullable Properties props) throws SQLException { if (!acceptedStrategies.containsKey(strategy)) { throw new UnsupportedOperationException( @@ -90,7 +92,7 @@ public HostSpec getHostSpecByStrategy( new Object[] {strategy, DriverConnectionProvider.class})); } - return acceptedStrategies.get(strategy).getHost(hosts, role); + return acceptedStrategies.get(strategy).getHost(hosts, role, props); } /** diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index 127129caa..e0826fc57 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -21,8 +21,9 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; +import java.util.HashMap; import java.util.List; -import java.util.Map.Entry; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.StringJoiner; @@ -30,6 +31,7 @@ import java.util.logging.Logger; import java.util.stream.Collectors; import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.cleanup.CanReleaseResources; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.util.HikariCPSQLException; @@ -44,9 +46,13 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static final Logger LOGGER = Logger.getLogger(HikariPooledConnectionProvider.class.getName()); - - private static final String LEAST_CONNECTIONS_STRATEGY = "leastConnections"; - + private static final Map acceptedStrategies = + Collections.unmodifiableMap(new HashMap() { + { + put(RandomHostSelector.STRATEGY_RANDOM, new RandomHostSelector()); + put(RoundRobinHostSelector.STRATEGY_ROUND_ROBIN, new RoundRobinHostSelector()); + } + }); private static final RdsUtils rdsUtils = new RdsUtils(); private static SlidingExpirationCache databasePools = new SlidingExpirationCache<>( @@ -56,6 +62,7 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static long poolExpirationCheckNanos = TimeUnit.MINUTES.toNanos(30); private final HikariPoolConfigurator poolConfigurator; private final HikariPoolMapping poolMapping; + private final LeastConnectionsHostSelector leastConnectionsHostSelector; /** * {@link HikariPooledConnectionProvider} constructor. This class can be passed to @@ -98,6 +105,7 @@ public HikariPooledConnectionProvider( HikariPoolConfigurator hikariPoolConfigurator, HikariPoolMapping mapping) { this.poolConfigurator = hikariPoolConfigurator; this.poolMapping = mapping; + this.leastConnectionsHostSelector = new LeastConnectionsHostSelector(databasePools); } /** @@ -134,6 +142,7 @@ public HikariPooledConnectionProvider( this.poolMapping = mapping; poolExpirationCheckNanos = poolExpirationNanos; databasePools.setCleanupIntervalNanos(poolCleanupNanos); + this.leastConnectionsHostSelector = new LeastConnectionsHostSelector(databasePools); } @Override @@ -145,44 +154,28 @@ public boolean acceptsUrl( @Override public boolean acceptsStrategy(@NonNull HostRole role, @NonNull String strategy) { - return LEAST_CONNECTIONS_STRATEGY.equals(strategy); + return acceptedStrategies.containsKey(strategy) + || LeastConnectionsHostSelector.STRATEGY_LEAST_CONNECTIONS.equals(strategy); } @Override public HostSpec getHostSpecByStrategy( - @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) - throws SQLException { - if (!LEAST_CONNECTIONS_STRATEGY.equals(strategy)) { + @NonNull List hosts, + @NonNull HostRole role, + @NonNull String strategy, + @Nullable Properties props) throws SQLException { + if (!acceptsStrategy(role, strategy)) { throw new UnsupportedOperationException( Messages.get( "ConnectionProvider.unsupportedHostSpecSelectorStrategy", - new Object[] {strategy, HikariPooledConnectionProvider.class})); - } - - // Remove hosts with the wrong role - List eligibleHosts = hosts.stream() - .filter(hostSpec -> role.equals(hostSpec.getRole())) - .sorted((hostSpec1, hostSpec2) -> - getNumConnections(hostSpec1) - getNumConnections(hostSpec2)) - .collect(Collectors.toList()); - - if (eligibleHosts.size() == 0) { - throw new SQLException(Messages.get("HostSelector.noHostsMatchingRole", new Object[]{role})); + new Object[] {strategy, DataSourceConnectionProvider.class})); } - return eligibleHosts.get(0); - } - - private int getNumConnections(HostSpec hostSpec) { - int numConnections = 0; - final String url = hostSpec.getUrl(); - for (Entry entry : databasePools.getEntries().entrySet()) { - if (!url.equals(entry.getKey().url)) { - continue; - } - numConnections += entry.getValue().getHikariPoolMXBean().getActiveConnections(); + if (LeastConnectionsHostSelector.STRATEGY_LEAST_CONNECTIONS.equals(strategy)) { + return this.leastConnectionsHostSelector.getHost(hosts, role, props); + } else { + return acceptedStrategies.get(strategy).getHost(hosts, role, props); } - return numConnections; } @Override @@ -338,6 +331,10 @@ public PoolKey(final @NonNull String url, final @NonNull String extraKey) { this.extraKey = extraKey; } + public String getUrl() { + return this.url; + } + @Override public int hashCode() { final int prime = 31; diff --git a/wrapper/src/main/java/software/amazon/jdbc/HostSelector.java b/wrapper/src/main/java/software/amazon/jdbc/HostSelector.java index 40dcc520f..72c4e597b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HostSelector.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HostSelector.java @@ -18,17 +18,22 @@ import java.sql.SQLException; import java.util.List; +import java.util.Properties; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; public interface HostSelector { /** * Selects a host with the requested role from the given host list. * - * @param hosts a list of available hosts to pick from - * @param role the desired host role - either a writer or a reader + * @param hosts a list of available hosts to pick from. + * @param role the desired host role - either a writer or a reader. + * @param props connection properties that may be needed by the host selector in order to choose a host. * @return a host matching the requested role * @throws SQLException if the host list does not contain any hosts matching the requested role or * an error occurs while selecting a host */ - HostSpec getHost(List hosts, HostRole role) throws SQLException; + HostSpec getHost( + @NonNull List hosts, @NonNull HostRole role, @Nullable Properties props) throws SQLException; } diff --git a/wrapper/src/main/java/software/amazon/jdbc/LeastConnectionsHostSelector.java b/wrapper/src/main/java/software/amazon/jdbc/LeastConnectionsHostSelector.java new file mode 100644 index 000000000..ec5299415 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/LeastConnectionsHostSelector.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc; + +import com.zaxxer.hikari.HikariDataSource; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.SlidingExpirationCache; + +public class LeastConnectionsHostSelector implements HostSelector { + public static final String STRATEGY_LEAST_CONNECTIONS = "leastConnections"; + private final SlidingExpirationCache databasePools; + + public LeastConnectionsHostSelector( + SlidingExpirationCache databasePools) { + this.databasePools = databasePools; + } + + @Override + public HostSpec getHost( + @NonNull final List hosts, + @NonNull final HostRole role, + @Nullable final Properties props) throws SQLException { + final List eligibleHosts = hosts.stream() + .filter(hostSpec -> role.equals(hostSpec.getRole())) + .sorted((hostSpec1, hostSpec2) -> + getNumConnections(hostSpec1, this.databasePools) - getNumConnections(hostSpec2, this.databasePools)) + .collect(Collectors.toList()); + + if (eligibleHosts.size() == 0) { + throw new SQLException(Messages.get("HostSelector.noHostsMatchingRole", new Object[]{role})); + } + + return eligibleHosts.get(0); + } + + private int getNumConnections( + final HostSpec hostSpec, + final SlidingExpirationCache databasePools) { + int numConnections = 0; + final String url = hostSpec.getUrl(); + for (final Map.Entry entry : + databasePools.getEntries().entrySet()) { + if (!url.equals(entry.getKey().getUrl())) { + continue; + } + numConnections += entry.getValue().getHikariPoolMXBean().getActiveConnections(); + } + return numConnections; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/PluginService.java b/wrapper/src/main/java/software/amazon/jdbc/PluginService.java index d7edd90fe..02d6f2370 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/PluginService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/PluginService.java @@ -158,4 +158,10 @@ HostSpec getHostSpecByStrategy(HostRole role, String strategy) void fillAliases(final Connection connection, final HostSpec hostSpec) throws SQLException; HostSpecBuilder getHostSpecBuilder(); + + ConnectionProvider getConnectionProvider(); + + String getDriverProtocol(); + + Properties getProperties(); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java index b53d1dce2..f88a81b5b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java @@ -153,6 +153,16 @@ private HostSpec getWriter(final @NonNull List hosts) { return null; } + @Override + public ConnectionProvider getConnectionProvider() { + return this.pluginManager.defaultConnProvider; + } + + @Override + public String getDriverProtocol() { + return this.driverProtocol; + } + @Override public void setCurrentConnection( final @NonNull Connection connection, final @NonNull HostSpec hostSpec) throws SQLException { @@ -543,4 +553,9 @@ public void fillAliases(Connection connection, HostSpec hostSpec) throws SQLExce public HostSpecBuilder getHostSpecBuilder() { return new HostSpecBuilder(new HostAvailabilityStrategyFactory().create(this.props)); } + + @Override + public Properties getProperties() { + return this.props; + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/RandomHostSelector.java b/wrapper/src/main/java/software/amazon/jdbc/RandomHostSelector.java index c9bd5af1e..29d3afbaf 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/RandomHostSelector.java +++ b/wrapper/src/main/java/software/amazon/jdbc/RandomHostSelector.java @@ -18,21 +18,29 @@ import java.sql.SQLException; import java.util.List; +import java.util.Properties; import java.util.Random; import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.util.Messages; public class RandomHostSelector implements HostSelector { + public static final String STRATEGY_RANDOM = "random"; + @Override - public HostSpec getHost(List hosts, HostRole role) throws SQLException { - List eligibleHosts = hosts.stream() + public HostSpec getHost( + @NonNull final List hosts, + @NonNull final HostRole role, + @Nullable final Properties props) throws SQLException { + final List eligibleHosts = hosts.stream() .filter(hostSpec -> role.equals(hostSpec.getRole())).collect(Collectors.toList()); if (eligibleHosts.size() == 0) { - throw new SQLException(Messages.get("RandomHostSelector.noHostsMatchingRole", new Object[]{role})); + throw new SQLException(Messages.get("HostSelector.noHostsMatchingRole", new Object[]{role})); } - int randomIndex = new Random().nextInt(eligibleHosts.size()); + final int randomIndex = new Random().nextInt(eligibleHosts.size()); return eligibleHosts.get(randomIndex); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/RoundRobinHostSelector.java b/wrapper/src/main/java/software/amazon/jdbc/RoundRobinHostSelector.java new file mode 100644 index 000000000..5e284b546 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/RoundRobinHostSelector.java @@ -0,0 +1,200 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc; + +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.util.CacheMap; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.StringUtils; + +public class RoundRobinHostSelector implements HostSelector { + public static final AwsWrapperProperty ROUND_ROBIN_HOST_WEIGHT_PAIRS = new AwsWrapperProperty( + "roundRobinHostWeightPairs", null, + "Comma separated list of database host-weight pairs in the format of `:`."); + public static final AwsWrapperProperty ROUND_ROBIN_DEFAULT_WEIGHT = new AwsWrapperProperty( + "roundRobinDefaultWeight", "1", + "The default weight for any hosts that have not been configured with the `roundRobinHostWeightPairs` parameter."); + public static final String STRATEGY_ROUND_ROBIN = "roundRobin"; + private static final int DEFAULT_WEIGHT = 1; + private static final long DEFAULT_ROUND_ROBIN_CACHE_EXPIRE_NANO = TimeUnit.MINUTES.toNanos(10); + static final Pattern HOST_WEIGHT_PAIRS_PATTERN = + Pattern.compile( + "((?[^:/?#]*):(?[0-9]*))"); + private static final CacheMap roundRobinCache = new CacheMap<>(); + + static { + PropertyDefinition.registerPluginProperties(RoundRobinHostSelector.class); + } + + @Override + public synchronized HostSpec getHost( + final @NonNull List hosts, + final @NonNull HostRole role, + final @Nullable Properties props) throws SQLException { + final List eligibleHosts = hosts.stream() + .filter(hostSpec -> role.equals(hostSpec.getRole())) + .sorted(Comparator.comparing(HostSpec::getHost)) + .collect(Collectors.toList()); + + if (eligibleHosts.isEmpty()) { + throw new SQLException(Messages.get("HostSelector.noHostsMatchingRole", new Object[]{role})); + } + + // Create new cache entries for provided hosts if necessary. All hosts point to the same cluster info. + createCacheEntryForHosts(eligibleHosts, props); + final String currentClusterInfoKey = eligibleHosts.get(0).getHost(); + final RoundRobinClusterInfo clusterInfo = roundRobinCache.get(currentClusterInfoKey); + + final HostSpec lastHost = clusterInfo.lastHost; + int lastHostIndex = -1; + + // Check if lastHost is in list of eligible hosts. Update lastHostIndex. + if (lastHost != null) { + for (int i = 0; i < eligibleHosts.size(); i++) { + if (eligibleHosts.get(i).getHost().equals(lastHost.getHost())) { + lastHostIndex = i; + } + } + } + + final int targetHostIndex; + // If the host is weighted and the lastHost is in the eligibleHosts list. + if (clusterInfo.weightCounter > 0 && lastHostIndex != -1) { + targetHostIndex = lastHostIndex; + } else { + if (lastHostIndex != -1 && lastHostIndex != eligibleHosts.size() - 1) { + targetHostIndex = lastHostIndex + 1; + } else { + targetHostIndex = 0; + } + + final Integer weight = clusterInfo.clusterWeightsMap.get(eligibleHosts.get(targetHostIndex).getHost()); + clusterInfo.weightCounter = weight == null ? clusterInfo.defaultWeight : weight; + } + + clusterInfo.weightCounter--; + clusterInfo.lastHost = eligibleHosts.get(targetHostIndex); + + return eligibleHosts.get(targetHostIndex); + } + + private void createCacheEntryForHosts( + final @NonNull List hosts, + final @Nullable Properties props) + throws SQLException { + final List hostsMissingCacheEntry = new ArrayList<>(); + final List hostsWithCacheEntry = new ArrayList<>(); + for (final HostSpec host : hosts) { + if (roundRobinCache.get(host.getHost()) != null) { + hostsWithCacheEntry.add(host); + } else { + hostsMissingCacheEntry.add(host); + } + } + + if ((hostsMissingCacheEntry.isEmpty() && !hostsWithCacheEntry.isEmpty())) { + for (final HostSpec host : hosts) { + roundRobinCache.put( + host.getHost(), + roundRobinCache.get(hostsWithCacheEntry.get(0).getHost()), + DEFAULT_ROUND_ROBIN_CACHE_EXPIRE_NANO); + } + } else if (hostsWithCacheEntry.isEmpty()) { + final RoundRobinClusterInfo roundRobinClusterInfo = new RoundRobinClusterInfo(); + updateCachePropertiesForRoundRobinClusterInfo(roundRobinClusterInfo, props); + for (final HostSpec host : hostsMissingCacheEntry) { + roundRobinCache.put( + host.getHost(), + roundRobinClusterInfo, + DEFAULT_ROUND_ROBIN_CACHE_EXPIRE_NANO); + } + } + } + + private void updateCachePropertiesForRoundRobinClusterInfo( + final @NonNull RoundRobinClusterInfo roundRobinClusterInfo, + final @Nullable Properties props) throws SQLException { + int defaultWeight = DEFAULT_WEIGHT; + if (props != null) { + final String defaultWeightString = ROUND_ROBIN_DEFAULT_WEIGHT.getString(props); + if (!StringUtils.isNullOrEmpty(defaultWeightString)) { + try { + final int parsedWeight = Integer.parseInt(defaultWeightString); + if (parsedWeight < DEFAULT_WEIGHT) { + throw new SQLException(Messages.get("HostSelector.roundRobinInvalidDefaultWeight")); + } + defaultWeight = parsedWeight; + } catch (NumberFormatException e) { + throw new SQLException(Messages.get("HostSelector.roundRobinInvalidDefaultWeight")); + } + } + } + roundRobinClusterInfo.defaultWeight = defaultWeight; + + if (props != null) { + final String hostWeights = ROUND_ROBIN_HOST_WEIGHT_PAIRS.getString(props); + if (!StringUtils.isNullOrEmpty(hostWeights)) { + final String[] hostWeightPairs = hostWeights.split(","); + for (final String pair : hostWeightPairs) { + final Matcher matcher = HOST_WEIGHT_PAIRS_PATTERN.matcher(pair); + if (!matcher.matches()) { + throw new SQLException(Messages.get("HostSelector.roundRobinInvalidHostWeightPairs")); + } + + final String hostName = matcher.group("host").trim(); + final String hostWeight = matcher.group("weight").trim(); + if (hostName.isEmpty() || hostWeight.isEmpty()) { + throw new SQLException(Messages.get("HostSelector.roundRobinInvalidHostWeightPairs")); + } + + try { + final int weight = Integer.parseInt(hostWeight); + if (weight < DEFAULT_WEIGHT) { + throw new SQLException(Messages.get("HostSelector.roundRobinInvalidHostWeightPairs")); + } + roundRobinClusterInfo.clusterWeightsMap.put(hostName, weight); + } catch (NumberFormatException e) { + throw new SQLException(Messages.get("HostSelector.roundRobinInvalidHostWeightPairs")); + } + } + } + } + } + + // For testing purposes only + public void clearCache() { + roundRobinCache.clear(); + } + + public static class RoundRobinClusterInfo { + public HostSpec lastHost; + public HashMap clusterWeightsMap = new HashMap<>(); + public int defaultWeight = 1; + public int weightCounter = 0; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java index 0947e1032..d6a83ae35 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java @@ -42,7 +42,8 @@ public class AuroraPgDialect extends PgDialect { + "CPU, COALESCE(REPLICA_LAG_IN_MSEC, 0), LAST_UPDATE_TIMESTAMP " + "FROM aurora_replica_status() " // filter out nodes that haven't been updated in the last 5 minutes - + "WHERE EXTRACT(EPOCH FROM(NOW() - LAST_UPDATE_TIMESTAMP)) <= 300 OR SESSION_ID = 'MASTER_SESSION_ID' "; + + "WHERE EXTRACT(EPOCH FROM(NOW() - LAST_UPDATE_TIMESTAMP)) <= 300 OR SESSION_ID = 'MASTER_SESSION_ID' " + + "OR LAST_UPDATE_TIMESTAMP IS NULL"; private static final String NODE_ID_QUERY = "SELECT aurora_db_instance_identifier()"; private static final String IS_READER_QUERY = "SELECT pg_is_in_recovery()"; diff --git a/wrapper/src/main/java/software/amazon/jdbc/ds/AwsWrapperDataSource.java b/wrapper/src/main/java/software/amazon/jdbc/ds/AwsWrapperDataSource.java index 359b52ed3..6838971e4 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ds/AwsWrapperDataSource.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ds/AwsWrapperDataSource.java @@ -78,6 +78,7 @@ public class AwsWrapperDataSource implements DataSource, Referenceable, Serializ protected @Nullable String serverName; protected @Nullable String serverPort; protected @Nullable String database; + private int loginTimeout = 0; @Override public Connection getConnection() throws SQLException { @@ -153,6 +154,16 @@ public Connection getConnection(final String username, final String password) th if (!StringUtils.isNullOrEmpty(this.targetDataSourceClassName)) { final DataSource targetDataSource = createTargetDataSource(); + try { + targetDataSource.setLoginTimeout(loginTimeout); + } catch (Exception ex) { + LOGGER.finest( + () -> + Messages.get( + "DataSource.failedToSetProperty", + new Object[] {"loginTimeout", targetDataSource.getClass(), ex.getCause().getMessage()})); + } + final TargetDriverDialectManager targetDriverDialectManager = new TargetDriverDialectManager(); final TargetDriverDialect targetDriverDialect = targetDriverDialectManager.getDialect(this.targetDataSourceClassName, props); @@ -284,12 +295,15 @@ public void setLogWriter(final PrintWriter out) throws SQLException { @Override public void setLoginTimeout(final int seconds) throws SQLException { - throw new SQLFeatureNotSupportedException(); + if (seconds < 0) { + throw new SQLException("Login timeout cannot be a negative value."); + } + loginTimeout = seconds; } @Override public int getLoginTimeout() throws SQLException { - throw new SQLFeatureNotSupportedException(); + return loginTimeout; } @Override @@ -330,7 +344,7 @@ private void setCredentialProperties(final Properties props) { } } - private DataSource createTargetDataSource() throws SQLException { + DataSource createTargetDataSource() throws SQLException { try { return WrapperUtils.createInstance(this.targetDataSourceClassName, DataSource.class); } catch (final InstantiationException instEx) { diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java index 1876f7485..1d872c026 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java @@ -16,112 +16,15 @@ package software.amazon.jdbc.hostlistprovider; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLSyntaxErrorException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map.Entry; -import java.util.Objects; + import java.util.Properties; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Logger; -import java.util.stream.Collectors; -import org.checkerframework.checker.nullness.qual.NonNull; -import org.checkerframework.checker.nullness.qual.Nullable; -import software.amazon.jdbc.AwsWrapperProperty; import software.amazon.jdbc.HostListProviderService; -import software.amazon.jdbc.HostRole; -import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.HostSpecBuilder; -import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.hostavailability.HostAvailability; -import software.amazon.jdbc.util.CacheMap; -import software.amazon.jdbc.util.ConnectionUrlParser; -import software.amazon.jdbc.util.Messages; -import software.amazon.jdbc.util.RdsUrlType; -import software.amazon.jdbc.util.RdsUtils; -import software.amazon.jdbc.util.StringUtils; -import software.amazon.jdbc.util.SynchronousExecutor; -import software.amazon.jdbc.util.Utils; - -public class AuroraHostListProvider implements DynamicHostListProvider { - - public static final AwsWrapperProperty CLUSTER_TOPOLOGY_REFRESH_RATE_MS = - new AwsWrapperProperty( - "clusterTopologyRefreshRateMs", - "30000", - "Cluster topology refresh rate in millis. " - + "The cached topology for the cluster will be invalidated after the specified time, " - + "after which it will be updated during the next interaction with the connection."); - - public static final AwsWrapperProperty CLUSTER_ID = new AwsWrapperProperty( - "clusterId", "", - "A unique identifier for the cluster. " - + "Connections with the same cluster id share a cluster topology cache. " - + "If unspecified, a cluster id is automatically created for AWS RDS clusters."); - - public static final AwsWrapperProperty CLUSTER_INSTANCE_HOST_PATTERN = - new AwsWrapperProperty( - "clusterInstanceHostPattern", - null, - "The cluster instance DNS pattern that will be used to build a complete instance endpoint. " - + "A \"?\" character in this pattern should be used as a placeholder for cluster instance names. " - + "This pattern is required to be specified for IP address or custom domain connections to AWS RDS " - + "clusters. Otherwise, if unspecified, the pattern will be automatically created for AWS RDS clusters."); - - private final Executor networkTimeoutExecutor = new SynchronousExecutor(); - private final HostListProviderService hostListProviderService; - private final String originalUrl; - private final String topologyQuery; - private final String nodeIdQuery; - private final String isReaderQuery; - private RdsUrlType rdsUrlType; - private final RdsUtils rdsHelper; - - private long refreshRateNano = CLUSTER_TOPOLOGY_REFRESH_RATE_MS.defaultValue != null - ? TimeUnit.MILLISECONDS.toNanos(Long.parseLong(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.defaultValue)) - : TimeUnit.MILLISECONDS.toNanos(30000); - private final long suggestedClusterIdRefreshRateNano = TimeUnit.MINUTES.toNanos(10); - private List hostList = new ArrayList<>(); - private List initialHostList = new ArrayList<>(); - private HostSpec initialHostSpec; - - public static final CacheMap> topologyCache = new CacheMap<>(); - public static final CacheMap suggestedPrimaryClusterIdCache = new CacheMap<>(); - public static final CacheMap primaryClusterIdCache = new CacheMap<>(); - private static final int defaultTopologyQueryTimeoutMs = 5000; - private final ReentrantLock lock = new ReentrantLock(); - protected String clusterId; - protected HostSpec clusterInstanceTemplate; - protected ConnectionUrlParser connectionUrlParser; - // A primary clusterId is a clusterId that is based off of a cluster endpoint URL - // (rather than a GUID or a value provided by the user). - protected boolean isPrimaryClusterId; +public class AuroraHostListProvider extends RdsHostListProvider { - protected boolean isInitialized = false; - - private static final Logger LOGGER = Logger.getLogger(AuroraHostListProvider.class.getName()); - - Properties properties; - - static { - PropertyDefinition.registerPluginProperties(AuroraHostListProvider.class); - } + static final Logger LOGGER = Logger.getLogger(AuroraHostListProvider.class.getName()); public AuroraHostListProvider( final Properties properties, @@ -130,557 +33,11 @@ public AuroraHostListProvider( final String topologyQuery, final String nodeIdQuery, final String isReaderQuery) { - this(hostListProviderService, - properties, + super(properties, originalUrl, + hostListProviderService, topologyQuery, nodeIdQuery, - isReaderQuery, - new ConnectionUrlParser()); - } - - public AuroraHostListProvider( - final HostListProviderService hostListProviderService, - final Properties properties, - final String originalUrl, - final String topologyQuery, - final String nodeIdQuery, - final String isReaderQuery, - final ConnectionUrlParser connectionUrlParser) { - this.rdsHelper = new RdsUtils(); - this.hostListProviderService = hostListProviderService; - this.properties = properties; - this.originalUrl = originalUrl; - this.topologyQuery = topologyQuery; - this.nodeIdQuery = nodeIdQuery; - this.isReaderQuery = isReaderQuery; - this.connectionUrlParser = connectionUrlParser; - } - - protected void init() throws SQLException { - if (this.isInitialized) { - return; - } - - lock.lock(); - try { - if (this.isInitialized) { - return; - } - - // initial topology is based on connection string - this.initialHostList = - this.connectionUrlParser.getHostsFromConnectionUrl(this.originalUrl, false, - () -> this.hostListProviderService.getHostSpecBuilder()); - if (this.initialHostList == null || this.initialHostList.isEmpty()) { - throw new SQLException(Messages.get("AuroraHostListProvider.parsedListEmpty", - new Object[] {this.originalUrl})); - } - this.initialHostSpec = this.initialHostList.get(0); - this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec); - - this.clusterId = UUID.randomUUID().toString(); - this.isPrimaryClusterId = false; - this.refreshRateNano = - TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties)); - - HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder(); - this.clusterInstanceTemplate = - CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties) == null - ? hostSpecBuilder.host(rdsHelper.getRdsInstanceHostPattern(originalUrl)).build() - : hostSpecBuilder.host(CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties)).build(); - validateHostPatternSetting(this.clusterInstanceTemplate.getHost()); - - this.rdsUrlType = rdsHelper.identifyRdsType(originalUrl); - - final String clusterIdSetting = CLUSTER_ID.getString(this.properties); - if (!StringUtils.isNullOrEmpty(clusterIdSetting)) { - this.clusterId = clusterIdSetting; - } else if (rdsUrlType == RdsUrlType.RDS_PROXY) { - // Each proxy is associated with a single cluster, so it's safe to use RDS Proxy Url as cluster - // identification - this.clusterId = this.initialHostSpec.getUrl(); - } else if (rdsUrlType.isRds()) { - final ClusterSuggestedResult clusterSuggestedResult = - getSuggestedClusterId(this.initialHostSpec.getUrl()); - if (clusterSuggestedResult != null && !StringUtils.isNullOrEmpty( - clusterSuggestedResult.clusterId)) { - this.clusterId = clusterSuggestedResult.clusterId; - this.isPrimaryClusterId = clusterSuggestedResult.isPrimaryClusterId; - } else { - final String clusterRdsHostUrl = - this.rdsHelper.getRdsClusterHostUrl(this.initialHostSpec.getUrl()); - if (!StringUtils.isNullOrEmpty(clusterRdsHostUrl)) { - this.clusterId = this.clusterInstanceTemplate.isPortSpecified() - ? String.format("%s:%s", clusterRdsHostUrl, this.clusterInstanceTemplate.getPort()) - : clusterRdsHostUrl; - this.isPrimaryClusterId = true; - primaryClusterIdCache.put(this.clusterId, true, this.suggestedClusterIdRefreshRateNano); - } - } - } - - this.isInitialized = true; - } finally { - lock.unlock(); - } - } - - /** - * Get cluster topology. It may require an extra call to database to fetch the latest topology. A - * cached copy of topology is returned if it's not yet outdated (controlled by {@link - * #refreshRateNano}). - * - * @param conn A connection to database to fetch the latest topology, if needed. - * @param forceUpdate If true, it forces a service to ignore cached copy of topology and to fetch - * a fresh one. - * @return a list of hosts that describes cluster topology. A writer is always at position 0. - * Returns an empty list if isn't available or is invalid (doesn't contain a writer). - * @throws SQLException if errors occurred while retrieving the topology. - */ - public FetchTopologyResult getTopology(final Connection conn, final boolean forceUpdate) throws SQLException { - init(); - - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(this.clusterId); - - // Change clusterId by accepting a suggested one - if (!StringUtils.isNullOrEmpty(suggestedPrimaryClusterId) - && !this.clusterId.equals(suggestedPrimaryClusterId)) { - - this.clusterId = suggestedPrimaryClusterId; - this.isPrimaryClusterId = true; - } - - final List cachedHosts = topologyCache.get(this.clusterId); - - // This clusterId is a primary one and is about to create a new entry in the cache. - // When a primary entry is created it needs to be suggested for other (non-primary) entries. - // Remember a flag to do suggestion after cache is updated. - final boolean needToSuggest = cachedHosts == null && this.isPrimaryClusterId; - - if (cachedHosts == null || forceUpdate) { - - // need to re-fetch topology - - if (conn == null) { - // can't fetch the latest topology since no connection - // return original hosts parsed from connection string - return new FetchTopologyResult(false, this.initialHostList); - } - - // fetch topology from the DB - final List hosts = queryForTopology(conn); - - if (!Utils.isNullOrEmpty(hosts)) { - topologyCache.put(this.clusterId, hosts, this.refreshRateNano); - if (needToSuggest) { - this.suggestPrimaryCluster(hosts); - } - return new FetchTopologyResult(false, hosts); - } - } - - if (cachedHosts == null) { - return new FetchTopologyResult(false, this.initialHostList); - } else { - // use cached data - return new FetchTopologyResult(true, cachedHosts); - } - } - - private ClusterSuggestedResult getSuggestedClusterId(final String url) { - for (final Entry> entry : topologyCache.getEntries().entrySet()) { - final String key = entry.getKey(); // clusterId - final List hosts = entry.getValue(); - final boolean isPrimaryCluster = primaryClusterIdCache.get(key, false, - this.suggestedClusterIdRefreshRateNano); - if (key.equals(url)) { - return new ClusterSuggestedResult(url, isPrimaryCluster); - } - if (hosts == null) { - continue; - } - for (final HostSpec host : hosts) { - if (host.getUrl().equals(url)) { - LOGGER.finest(() -> Messages.get("AuroraHostListProvider.suggestedClusterId", - new Object[] {key, url})); - return new ClusterSuggestedResult(key, isPrimaryCluster); - } - } - } - return null; - } - - protected void suggestPrimaryCluster(final @NonNull List primaryClusterHosts) { - if (Utils.isNullOrEmpty(primaryClusterHosts)) { - return; - } - - final Set primaryClusterHostUrls = new HashSet<>(); - for (final HostSpec hostSpec : primaryClusterHosts) { - primaryClusterHostUrls.add(hostSpec.getUrl()); - } - - for (final Entry> entry : topologyCache.getEntries().entrySet()) { - final String clusterId = entry.getKey(); - final List clusterHosts = entry.getValue(); - final boolean isPrimaryCluster = primaryClusterIdCache.get(clusterId, false, - this.suggestedClusterIdRefreshRateNano); - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(clusterId); - if (isPrimaryCluster - || !StringUtils.isNullOrEmpty(suggestedPrimaryClusterId) - || Utils.isNullOrEmpty(clusterHosts)) { - continue; - } - - // The entry is non-primary - for (final HostSpec host : clusterHosts) { - if (primaryClusterHostUrls.contains(host.getUrl())) { - // Instance on this cluster matches with one of the instance on primary cluster - // Suggest the primary clusterId to this entry - suggestedPrimaryClusterIdCache.put(clusterId, this.clusterId, - this.suggestedClusterIdRefreshRateNano); - break; - } - } - } - } - - /** - * Obtain a cluster topology from database. - * - * @param conn A connection to database to fetch the latest topology. - * @return a list of {@link HostSpec} objects representing the topology - * @throws SQLException if errors occurred while retrieving the topology. - */ - protected List queryForTopology(final Connection conn) throws SQLException { - int networkTimeout = -1; - try { - networkTimeout = conn.getNetworkTimeout(); - // The topology query is not monitored by the EFM plugin, so it needs a socket timeout - if (networkTimeout == 0) { - conn.setNetworkTimeout(networkTimeoutExecutor, defaultTopologyQueryTimeoutMs); - } - } catch (SQLException e) { - LOGGER.warning(() -> Messages.get("AuroraHostListProvider.errorGettingNetworkTimeout", - new Object[] {e.getMessage()})); - } - - try (final Statement stmt = conn.createStatement(); - final ResultSet resultSet = stmt.executeQuery(this.topologyQuery)) { - return processQueryResults(resultSet); - } catch (final SQLSyntaxErrorException e) { - throw new SQLException(Messages.get("AuroraHostListProvider.invalidQuery"), e); - } finally { - if (networkTimeout == 0 && !conn.isClosed()) { - conn.setNetworkTimeout(networkTimeoutExecutor, networkTimeout); - } - } - } - - /** - * Form a list of hosts from the results of the topology query. - * - * @param resultSet The results of the topology query - * @return a list of {@link HostSpec} objects representing - * the topology that was returned by the - * topology query. The list will be empty if the topology query returned an invalid topology - * (no writer instance). - */ - private List processQueryResults(final ResultSet resultSet) throws SQLException { - - final HashMap hostMap = new HashMap<>(); - - // Data is result set is ordered by last updated time so the latest records go last. - // When adding hosts to a map, the newer records replace the older ones. - while (resultSet.next()) { - final HostSpec host = createHost(resultSet); - hostMap.put(host.getHost(), host); - } - - final List hosts = new ArrayList<>(); - final List writers = new ArrayList<>(); - - for (final HostSpec host : hostMap.values()) { - if (host.getRole() != HostRole.WRITER) { - hosts.add(host); - } else { - writers.add(host); - } - } - - int writerCount = writers.size(); - - if (writerCount == 0) { - LOGGER.severe( - () -> Messages.get( - "AuroraHostListProvider.invalidTopology")); - hosts.clear(); - } else if (writerCount == 1) { - hosts.add(writers.get(0)); - } else { - // Take the latest updated writer node as the current writer. All others will be ignored. - List sortedWriters = writers.stream() - .sorted(Comparator.comparing(HostSpec::getLastUpdateTime).reversed()) - .collect(Collectors.toList()); - hosts.add(sortedWriters.get(0)); - } - - return hosts; - } - - /** - * Creates an instance of HostSpec which captures details about a connectable host. - * - * @param resultSet the result set from querying the topology - * @return a {@link HostSpec} instance for a specific instance from the cluster - * @throws SQLException If unable to retrieve the hostName from the result set - */ - private HostSpec createHost(final ResultSet resultSet) throws SQLException { - // According to the topology query the result set - // should contain 4 columns: node ID, 1/0 (writer/reader), CPU utilization, node lag in time. - String hostName = resultSet.getString(1); - final boolean isWriter = resultSet.getBoolean(2); - final float cpuUtilization = resultSet.getFloat(3); - final float nodeLag = resultSet.getFloat(4); - Timestamp lastUpdateTime; - try { - lastUpdateTime = resultSet.getTimestamp(5); - } catch (Exception e) { - lastUpdateTime = Timestamp.from(Instant.now()); - } - - // Calculate weight based on node lag in time and CPU utilization. - final long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization); - - return createHost(hostName, isWriter, weight, lastUpdateTime); - } - - private HostSpec createHost(String host, final boolean isWriter, final long weight, final Timestamp lastUpdateTime) { - host = host == null ? "?" : host; - final String endpoint = getHostEndpoint(host); - final int port = this.clusterInstanceTemplate.isPortSpecified() - ? this.clusterInstanceTemplate.getPort() - : this.initialHostSpec.getPort(); - - final HostSpec hostSpec = this.hostListProviderService.getHostSpecBuilder() - .host(endpoint) - .port(port) - .role(isWriter ? HostRole.WRITER : HostRole.READER) - .availability(HostAvailability.AVAILABLE) - .weight(weight) - .lastUpdateTime(lastUpdateTime) - .build(); - hostSpec.addAlias(host); - hostSpec.setHostId(host); - return hostSpec; - } - - /** - * Build a host dns endpoint based on host/node name. - * - * @param nodeName A host name. - * @return Host dns endpoint - */ - private String getHostEndpoint(final String nodeName) { - final String host = this.clusterInstanceTemplate.getHost(); - return host.replace("?", nodeName); - } - - /** - * Get cached topology. - * - * @return list of hosts that represents topology. If there's no topology in the cache or the - * cached topology is outdated, it returns null. - */ - public @Nullable List getCachedTopology() { - return topologyCache.get(this.clusterId); - } - - /** - * Clear topology cache for all clusters. - */ - public static void clearAll() { - topologyCache.clear(); - primaryClusterIdCache.clear(); - suggestedPrimaryClusterIdCache.clear(); - } - - /** - * Clear topology cache for the current cluster. - */ - public void clear() { - topologyCache.remove(this.clusterId); - } - - @Override - public List refresh() throws SQLException { - return this.refresh(null); - } - - @Override - public List refresh(final Connection connection) throws SQLException { - init(); - final Connection currentConnection = connection != null - ? connection - : this.hostListProviderService.getCurrentConnection(); - - final FetchTopologyResult results = getTopology(currentConnection, false); - LOGGER.finest(() -> Utils.logTopology(results.hosts)); - - this.hostList = results.hosts; - return Collections.unmodifiableList(hostList); - } - - @Override - public List forceRefresh() throws SQLException { - return this.forceRefresh(null); - } - - @Override - public List forceRefresh(final Connection connection) throws SQLException { - init(); - final Connection currentConnection = connection != null - ? connection - : this.hostListProviderService.getCurrentConnection(); - - final FetchTopologyResult results = getTopology(currentConnection, true); - LOGGER.finest(() -> Utils.logTopology(results.hosts)); - this.hostList = results.hosts; - return Collections.unmodifiableList(this.hostList); - } - - public RdsUrlType getRdsUrlType() throws SQLException { - init(); - return this.rdsUrlType; - } - - private void validateHostPatternSetting(final String hostPattern) { - if (!this.rdsHelper.isDnsPatternValid(hostPattern)) { - // "Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host - // pattern must contain a '?' - // character as a placeholder for the DB instance identifiers of the instances in the cluster" - final String message = Messages.get("AuroraHostListProvider.invalidPattern"); - LOGGER.severe(message); - throw new RuntimeException(message); - } - - final RdsUrlType rdsUrlType = this.rdsHelper.identifyRdsType(hostPattern); - if (rdsUrlType == RdsUrlType.RDS_PROXY) { - // "An RDS Proxy url can't be used as the 'clusterInstanceHostPattern' configuration setting." - final String message = - Messages.get("AuroraHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy"); - LOGGER.severe(message); - throw new RuntimeException(message); - } - - if (rdsUrlType == RdsUrlType.RDS_CUSTOM_CLUSTER) { - // "An RDS Custom Cluster endpoint can't be used as the 'clusterInstanceHostPattern' - // configuration setting." - final String message = - Messages.get("AuroraHostListProvider.clusterInstanceHostPatternNotSupportedForRdsCustom"); - LOGGER.severe(message); - throw new RuntimeException(message); - } - } - - public static void logCache() { - LOGGER.finest(() -> { - final StringBuilder sb = new StringBuilder(); - final Set>> cacheEntries = topologyCache.getEntries().entrySet(); - - if (cacheEntries.isEmpty()) { - sb.append("Cache is empty."); - return sb.toString(); - } - - for (final Entry> entry : cacheEntries) { - final List hosts = entry.getValue(); - final Boolean isPrimaryCluster = primaryClusterIdCache.get(entry.getKey()); - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(entry.getKey()); - - if (sb.length() > 0) { - sb.append("\n"); - } - sb.append("[").append(entry.getKey()).append("]:\n") - .append("\tisPrimaryCluster: ") - .append(isPrimaryCluster != null && isPrimaryCluster).append("\n") - .append("\tsuggestedPrimaryCluster: ") - .append(suggestedPrimaryClusterId).append("\n") - .append("\tHosts: "); - - if (hosts == null) { - sb.append(""); - } else { - for (final HostSpec h : hosts) { - sb.append("\n\t").append(h); - } - } - } - return sb.toString(); - }); - } - - static class FetchTopologyResult { - - public List hosts; - public boolean isCachedData; - - public FetchTopologyResult(final boolean isCachedData, final List hosts) { - this.isCachedData = isCachedData; - this.hosts = hosts; - } - } - - static class ClusterSuggestedResult { - - public String clusterId; - public boolean isPrimaryClusterId; - - public ClusterSuggestedResult(final String clusterId, final boolean isPrimaryClusterId) { - this.clusterId = clusterId; - this.isPrimaryClusterId = isPrimaryClusterId; - } - } - - @Override - public HostRole getHostRole(Connection conn) throws SQLException { - try (final Statement stmt = conn.createStatement(); - final ResultSet rs = stmt.executeQuery(this.isReaderQuery)) { - if (rs.next()) { - boolean isReader = rs.getBoolean(1); - return isReader ? HostRole.READER : HostRole.WRITER; - } - } catch (SQLException e) { - throw new SQLException(Messages.get("AuroraHostListProvider.errorGettingHostRole"), e); - } - - throw new SQLException(Messages.get("AuroraHostListProvider.errorGettingHostRole")); - } - - @Override - public HostSpec identifyConnection(Connection connection) throws SQLException { - try (final Statement stmt = connection.createStatement(); - final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) { - if (resultSet.next()) { - final String instanceName = resultSet.getString(1); - - final List topology = this.refresh(); - - if (topology == null) { - return null; - } - - return topology - .stream() - .filter(host -> Objects.equals(instanceName, host.getHostId())) - .findAny() - .orElse(null); - } - } catch (final SQLException e) { - throw new SQLException(Messages.get("AuroraHostListProvider.errorIdentifyConnection"), e); - } - - throw new SQLException(Messages.get("AuroraHostListProvider.errorIdentifyConnection")); + isReaderQuery); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java new file mode 100644 index 000000000..af6a4bdf6 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java @@ -0,0 +1,669 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.AwsWrapperProperty; +import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.HostRole; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.util.CacheMap; +import software.amazon.jdbc.util.ConnectionUrlParser; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.RdsUrlType; +import software.amazon.jdbc.util.RdsUtils; +import software.amazon.jdbc.util.StringUtils; +import software.amazon.jdbc.util.SynchronousExecutor; +import software.amazon.jdbc.util.Utils; + +public class RdsHostListProvider implements DynamicHostListProvider { + + public static final AwsWrapperProperty CLUSTER_TOPOLOGY_REFRESH_RATE_MS = + new AwsWrapperProperty( + "clusterTopologyRefreshRateMs", + "30000", + "Cluster topology refresh rate in millis. " + + "The cached topology for the cluster will be invalidated after the specified time, " + + "after which it will be updated during the next interaction with the connection."); + + public static final AwsWrapperProperty CLUSTER_ID = new AwsWrapperProperty( + "clusterId", "", + "A unique identifier for the cluster. " + + "Connections with the same cluster id share a cluster topology cache. " + + "If unspecified, a cluster id is automatically created for AWS RDS clusters."); + + public static final AwsWrapperProperty CLUSTER_INSTANCE_HOST_PATTERN = + new AwsWrapperProperty( + "clusterInstanceHostPattern", + null, + "The cluster instance DNS pattern that will be used to build a complete instance endpoint. " + + "A \"?\" character in this pattern should be used as a placeholder for cluster instance names. " + + "This pattern is required to be specified for IP address or custom domain connections to AWS RDS " + + "clusters. Otherwise, if unspecified, the pattern will be automatically created for AWS RDS clusters."); + + final Executor networkTimeoutExecutor = new SynchronousExecutor(); + final HostListProviderService hostListProviderService; + private final String originalUrl; + final String topologyQuery; + final String nodeIdQuery; + private final String isReaderQuery; + private RdsUrlType rdsUrlType; + private final RdsUtils rdsHelper; + + private long refreshRateNano = CLUSTER_TOPOLOGY_REFRESH_RATE_MS.defaultValue != null + ? TimeUnit.MILLISECONDS.toNanos(Long.parseLong(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.defaultValue)) + : TimeUnit.MILLISECONDS.toNanos(30000); + private final long suggestedClusterIdRefreshRateNano = TimeUnit.MINUTES.toNanos(10); + private List hostList = new ArrayList<>(); + private List initialHostList = new ArrayList<>(); + private HostSpec initialHostSpec; + + public static final CacheMap> topologyCache = new CacheMap<>(); + public static final CacheMap suggestedPrimaryClusterIdCache = new CacheMap<>(); + public static final CacheMap primaryClusterIdCache = new CacheMap<>(); + + static final int defaultTopologyQueryTimeoutMs = 5000; + private final ReentrantLock lock = new ReentrantLock(); + protected String clusterId; + protected HostSpec clusterInstanceTemplate; + protected ConnectionUrlParser connectionUrlParser; + + // A primary clusterId is a clusterId that is based off of a cluster endpoint URL + // (rather than a GUID or a value provided by the user). + protected boolean isPrimaryClusterId; + + protected boolean isInitialized = false; + + static final Logger LOGGER = Logger.getLogger(RdsHostListProvider.class.getName()); + + Properties properties; + + static { + PropertyDefinition.registerPluginProperties(RdsHostListProvider.class); + } + + public RdsHostListProvider( + final Properties properties, + final String originalUrl, + final HostListProviderService hostListProviderService, + final String topologyQuery, + final String nodeIdQuery, + final String isReaderQuery) { + this.rdsHelper = new RdsUtils(); + this.hostListProviderService = hostListProviderService; + this.properties = properties; + this.originalUrl = originalUrl; + this.topologyQuery = topologyQuery; + this.nodeIdQuery = nodeIdQuery; + this.isReaderQuery = isReaderQuery; + this.connectionUrlParser = new ConnectionUrlParser(); + } + + protected void init() throws SQLException { + if (this.isInitialized) { + return; + } + + lock.lock(); + try { + if (this.isInitialized) { + return; + } + + // initial topology is based on connection string + this.initialHostList = + this.connectionUrlParser.getHostsFromConnectionUrl(this.originalUrl, false, + () -> this.hostListProviderService.getHostSpecBuilder()); + if (this.initialHostList == null || this.initialHostList.isEmpty()) { + throw new SQLException(Messages.get("RdsHostListProvider.parsedListEmpty", + new Object[] {this.originalUrl})); + } + this.initialHostSpec = this.initialHostList.get(0); + this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec); + + this.clusterId = UUID.randomUUID().toString(); + this.isPrimaryClusterId = false; + this.refreshRateNano = + TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties)); + + HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder(); + this.clusterInstanceTemplate = + CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties) == null + ? hostSpecBuilder.host(rdsHelper.getRdsInstanceHostPattern(originalUrl)).build() + : hostSpecBuilder.host(CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties)).build(); + validateHostPatternSetting(this.clusterInstanceTemplate.getHost()); + + this.rdsUrlType = rdsHelper.identifyRdsType(originalUrl); + + final String clusterIdSetting = CLUSTER_ID.getString(this.properties); + if (!StringUtils.isNullOrEmpty(clusterIdSetting)) { + this.clusterId = clusterIdSetting; + } else if (rdsUrlType == RdsUrlType.RDS_PROXY) { + // Each proxy is associated with a single cluster, so it's safe to use RDS Proxy Url as cluster + // identification + this.clusterId = this.initialHostSpec.getUrl(); + } else if (rdsUrlType.isRds()) { + final ClusterSuggestedResult clusterSuggestedResult = + getSuggestedClusterId(this.initialHostSpec.getUrl()); + if (clusterSuggestedResult != null && !StringUtils.isNullOrEmpty( + clusterSuggestedResult.clusterId)) { + this.clusterId = clusterSuggestedResult.clusterId; + this.isPrimaryClusterId = clusterSuggestedResult.isPrimaryClusterId; + } else { + final String clusterRdsHostUrl = + this.rdsHelper.getRdsClusterHostUrl(this.initialHostSpec.getUrl()); + if (!StringUtils.isNullOrEmpty(clusterRdsHostUrl)) { + this.clusterId = this.clusterInstanceTemplate.isPortSpecified() + ? String.format("%s:%s", clusterRdsHostUrl, this.clusterInstanceTemplate.getPort()) + : clusterRdsHostUrl; + this.isPrimaryClusterId = true; + primaryClusterIdCache.put(this.clusterId, true, this.suggestedClusterIdRefreshRateNano); + } + } + } + + this.isInitialized = true; + } finally { + lock.unlock(); + } + } + + /** + * Get cluster topology. It may require an extra call to database to fetch the latest topology. A + * cached copy of topology is returned if it's not yet outdated (controlled by {@link + * #refreshRateNano}). + * + * @param conn A connection to database to fetch the latest topology, if needed. + * @param forceUpdate If true, it forces a service to ignore cached copy of topology and to fetch + * a fresh one. + * @return a list of hosts that describes cluster topology. A writer is always at position 0. + * Returns an empty list if isn't available or is invalid (doesn't contain a writer). + * @throws SQLException if errors occurred while retrieving the topology. + */ + public FetchTopologyResult getTopology(final Connection conn, final boolean forceUpdate) throws SQLException { + init(); + + final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(this.clusterId); + + // Change clusterId by accepting a suggested one + if (!StringUtils.isNullOrEmpty(suggestedPrimaryClusterId) + && !this.clusterId.equals(suggestedPrimaryClusterId)) { + + this.clusterId = suggestedPrimaryClusterId; + this.isPrimaryClusterId = true; + } + + final List cachedHosts = topologyCache.get(this.clusterId); + + // This clusterId is a primary one and is about to create a new entry in the cache. + // When a primary entry is created it needs to be suggested for other (non-primary) entries. + // Remember a flag to do suggestion after cache is updated. + final boolean needToSuggest = cachedHosts == null && this.isPrimaryClusterId; + + if (cachedHosts == null || forceUpdate) { + + // need to re-fetch topology + + if (conn == null) { + // can't fetch the latest topology since no connection + // return original hosts parsed from connection string + return new FetchTopologyResult(false, this.initialHostList); + } + + // fetch topology from the DB + final List hosts = queryForTopology(conn); + + if (!Utils.isNullOrEmpty(hosts)) { + topologyCache.put(this.clusterId, hosts, this.refreshRateNano); + if (needToSuggest) { + this.suggestPrimaryCluster(hosts); + } + return new FetchTopologyResult(false, hosts); + } + } + + if (cachedHosts == null) { + return new FetchTopologyResult(false, this.initialHostList); + } else { + // use cached data + return new FetchTopologyResult(true, cachedHosts); + } + } + + private ClusterSuggestedResult getSuggestedClusterId(final String url) { + for (final Entry> entry : topologyCache.getEntries().entrySet()) { + final String key = entry.getKey(); // clusterId + final List hosts = entry.getValue(); + final boolean isPrimaryCluster = primaryClusterIdCache.get(key, false, + this.suggestedClusterIdRefreshRateNano); + if (key.equals(url)) { + return new ClusterSuggestedResult(url, isPrimaryCluster); + } + if (hosts == null) { + continue; + } + for (final HostSpec host : hosts) { + if (host.getUrl().equals(url)) { + LOGGER.finest(() -> Messages.get("RdsHostListProvider.suggestedClusterId", + new Object[] {key, url})); + return new ClusterSuggestedResult(key, isPrimaryCluster); + } + } + } + return null; + } + + protected void suggestPrimaryCluster(final @NonNull List primaryClusterHosts) { + if (Utils.isNullOrEmpty(primaryClusterHosts)) { + return; + } + + final Set primaryClusterHostUrls = new HashSet<>(); + for (final HostSpec hostSpec : primaryClusterHosts) { + primaryClusterHostUrls.add(hostSpec.getUrl()); + } + + for (final Entry> entry : topologyCache.getEntries().entrySet()) { + final String clusterId = entry.getKey(); + final List clusterHosts = entry.getValue(); + final boolean isPrimaryCluster = primaryClusterIdCache.get(clusterId, false, + this.suggestedClusterIdRefreshRateNano); + final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(clusterId); + if (isPrimaryCluster + || !StringUtils.isNullOrEmpty(suggestedPrimaryClusterId) + || Utils.isNullOrEmpty(clusterHosts)) { + continue; + } + + // The entry is non-primary + for (final HostSpec host : clusterHosts) { + if (primaryClusterHostUrls.contains(host.getUrl())) { + // Instance on this cluster matches with one of the instance on primary cluster + // Suggest the primary clusterId to this entry + suggestedPrimaryClusterIdCache.put(clusterId, this.clusterId, + this.suggestedClusterIdRefreshRateNano); + break; + } + } + } + } + + /** + * Obtain a cluster topology from database. + * + * @param conn A connection to database to fetch the latest topology. + * @return a list of {@link HostSpec} objects representing the topology + * @throws SQLException if errors occurred while retrieving the topology. + */ + protected List queryForTopology(final Connection conn) throws SQLException { + int networkTimeout = -1; + try { + networkTimeout = conn.getNetworkTimeout(); + // The topology query is not monitored by the EFM plugin, so it needs a socket timeout + if (networkTimeout == 0) { + conn.setNetworkTimeout(networkTimeoutExecutor, defaultTopologyQueryTimeoutMs); + } + } catch (SQLException e) { + LOGGER.warning(() -> Messages.get("RdsHostListProvider.errorGettingNetworkTimeout", + new Object[] {e.getMessage()})); + } + + try (final Statement stmt = conn.createStatement(); + final ResultSet resultSet = stmt.executeQuery(this.topologyQuery)) { + return processQueryResults(resultSet); + } catch (final SQLSyntaxErrorException e) { + throw new SQLException(Messages.get("RdsHostListProvider.invalidQuery"), e); + } finally { + if (networkTimeout == 0 && !conn.isClosed()) { + conn.setNetworkTimeout(networkTimeoutExecutor, networkTimeout); + } + } + } + + /** + * Form a list of hosts from the results of the topology query. + * + * @param resultSet The results of the topology query + * @return a list of {@link HostSpec} objects representing + * the topology that was returned by the + * topology query. The list will be empty if the topology query returned an invalid topology + * (no writer instance). + */ + private List processQueryResults(final ResultSet resultSet) throws SQLException { + + final HashMap hostMap = new HashMap<>(); + + // Data is result set is ordered by last updated time so the latest records go last. + // When adding hosts to a map, the newer records replace the older ones. + while (resultSet.next()) { + final HostSpec host = createHost(resultSet); + hostMap.put(host.getHost(), host); + } + + final List hosts = new ArrayList<>(); + final List writers = new ArrayList<>(); + + for (final HostSpec host : hostMap.values()) { + if (host.getRole() != HostRole.WRITER) { + hosts.add(host); + } else { + writers.add(host); + } + } + + int writerCount = writers.size(); + + if (writerCount == 0) { + LOGGER.severe( + () -> Messages.get( + "RdsHostListProvider.invalidTopology")); + hosts.clear(); + } else if (writerCount == 1) { + hosts.add(writers.get(0)); + } else { + // Take the latest updated writer node as the current writer. All others will be ignored. + List sortedWriters = writers.stream() + .sorted(Comparator.comparing(HostSpec::getLastUpdateTime).reversed()) + .collect(Collectors.toList()); + hosts.add(sortedWriters.get(0)); + } + + return hosts; + } + + /** + * Creates an instance of HostSpec which captures details about a connectable host. + * + * @param resultSet the result set from querying the topology + * @return a {@link HostSpec} instance for a specific instance from the cluster + * @throws SQLException If unable to retrieve the hostName from the result set + */ + private HostSpec createHost(final ResultSet resultSet) throws SQLException { + // According to the topology query the result set + // should contain 4 columns: node ID, 1/0 (writer/reader), CPU utilization, node lag in time. + String hostName = resultSet.getString(1); + final boolean isWriter = resultSet.getBoolean(2); + final float cpuUtilization = resultSet.getFloat(3); + final float nodeLag = resultSet.getFloat(4); + Timestamp lastUpdateTime; + try { + lastUpdateTime = resultSet.getTimestamp(5); + } catch (Exception e) { + lastUpdateTime = Timestamp.from(Instant.now()); + } + + // Calculate weight based on node lag in time and CPU utilization. + final long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization); + + return createHost(hostName, isWriter, weight, lastUpdateTime); + } + + private HostSpec createHost(String host, final boolean isWriter, final long weight, final Timestamp lastUpdateTime) { + host = host == null ? "?" : host; + final String endpoint = getHostEndpoint(host); + final int port = this.clusterInstanceTemplate.isPortSpecified() + ? this.clusterInstanceTemplate.getPort() + : this.initialHostSpec.getPort(); + + final HostSpec hostSpec = this.hostListProviderService.getHostSpecBuilder() + .host(endpoint) + .port(port) + .role(isWriter ? HostRole.WRITER : HostRole.READER) + .availability(HostAvailability.AVAILABLE) + .weight(weight) + .lastUpdateTime(lastUpdateTime) + .build(); + hostSpec.addAlias(host); + hostSpec.setHostId(host); + return hostSpec; + } + + /** + * Build a host dns endpoint based on host/node name. + * + * @param nodeName A host name. + * @return Host dns endpoint + */ + private String getHostEndpoint(final String nodeName) { + final String host = this.clusterInstanceTemplate.getHost(); + return host.replace("?", nodeName); + } + + /** + * Get cached topology. + * + * @return list of hosts that represents topology. If there's no topology in the cache or the + * cached topology is outdated, it returns null. + */ + public @Nullable List getCachedTopology() { + return topologyCache.get(this.clusterId); + } + + /** + * Clear topology cache for all clusters. + */ + public static void clearAll() { + topologyCache.clear(); + primaryClusterIdCache.clear(); + suggestedPrimaryClusterIdCache.clear(); + } + + /** + * Clear topology cache for the current cluster. + */ + public void clear() { + topologyCache.remove(this.clusterId); + } + + @Override + public List refresh() throws SQLException { + return this.refresh(null); + } + + @Override + public List refresh(final Connection connection) throws SQLException { + init(); + final Connection currentConnection = connection != null + ? connection + : this.hostListProviderService.getCurrentConnection(); + + final FetchTopologyResult results = getTopology(currentConnection, false); + LOGGER.finest(() -> Utils.logTopology(results.hosts)); + + this.hostList = results.hosts; + return Collections.unmodifiableList(hostList); + } + + @Override + public List forceRefresh() throws SQLException { + return this.forceRefresh(null); + } + + @Override + public List forceRefresh(final Connection connection) throws SQLException { + init(); + final Connection currentConnection = connection != null + ? connection + : this.hostListProviderService.getCurrentConnection(); + + final FetchTopologyResult results = getTopology(currentConnection, true); + LOGGER.finest(() -> Utils.logTopology(results.hosts)); + this.hostList = results.hosts; + return Collections.unmodifiableList(this.hostList); + } + + public RdsUrlType getRdsUrlType() throws SQLException { + init(); + return this.rdsUrlType; + } + + private void validateHostPatternSetting(final String hostPattern) { + if (!this.rdsHelper.isDnsPatternValid(hostPattern)) { + // "Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host + // pattern must contain a '?' + // character as a placeholder for the DB instance identifiers of the instances in the cluster" + final String message = Messages.get("RdsHostListProvider.invalidPattern"); + LOGGER.severe(message); + throw new RuntimeException(message); + } + + final RdsUrlType rdsUrlType = this.rdsHelper.identifyRdsType(hostPattern); + if (rdsUrlType == RdsUrlType.RDS_PROXY) { + // "An RDS Proxy url can't be used as the 'clusterInstanceHostPattern' configuration setting." + final String message = + Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy"); + LOGGER.severe(message); + throw new RuntimeException(message); + } + + if (rdsUrlType == RdsUrlType.RDS_CUSTOM_CLUSTER) { + // "An RDS Custom Cluster endpoint can't be used as the 'clusterInstanceHostPattern' + // configuration setting." + final String message = + Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRdsCustom"); + LOGGER.severe(message); + throw new RuntimeException(message); + } + } + + public static void logCache() { + LOGGER.finest(() -> { + final StringBuilder sb = new StringBuilder(); + final Set>> cacheEntries = topologyCache.getEntries().entrySet(); + + if (cacheEntries.isEmpty()) { + sb.append("Cache is empty."); + return sb.toString(); + } + + for (final Entry> entry : cacheEntries) { + final List hosts = entry.getValue(); + final Boolean isPrimaryCluster = primaryClusterIdCache.get(entry.getKey()); + final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(entry.getKey()); + + if (sb.length() > 0) { + sb.append("\n"); + } + sb.append("[").append(entry.getKey()).append("]:\n") + .append("\tisPrimaryCluster: ") + .append(isPrimaryCluster != null && isPrimaryCluster).append("\n") + .append("\tsuggestedPrimaryCluster: ") + .append(suggestedPrimaryClusterId).append("\n") + .append("\tHosts: "); + + if (hosts == null) { + sb.append(""); + } else { + for (final HostSpec h : hosts) { + sb.append("\n\t").append(h); + } + } + } + return sb.toString(); + }); + } + + static class FetchTopologyResult { + + public List hosts; + public boolean isCachedData; + + public FetchTopologyResult(final boolean isCachedData, final List hosts) { + this.isCachedData = isCachedData; + this.hosts = hosts; + } + } + + static class ClusterSuggestedResult { + + public String clusterId; + public boolean isPrimaryClusterId; + + public ClusterSuggestedResult(final String clusterId, final boolean isPrimaryClusterId) { + this.clusterId = clusterId; + this.isPrimaryClusterId = isPrimaryClusterId; + } + } + + @Override + public HostRole getHostRole(Connection conn) throws SQLException { + try (final Statement stmt = conn.createStatement(); + final ResultSet rs = stmt.executeQuery(this.isReaderQuery)) { + if (rs.next()) { + boolean isReader = rs.getBoolean(1); + return isReader ? HostRole.READER : HostRole.WRITER; + } + } catch (SQLException e) { + throw new SQLException(Messages.get("RdsHostListProvider.errorGettingHostRole"), e); + } + + throw new SQLException(Messages.get("RdsHostListProvider.errorGettingHostRole")); + } + + @Override + public HostSpec identifyConnection(Connection connection) throws SQLException { + try (final Statement stmt = connection.createStatement(); + final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) { + if (resultSet.next()) { + final String instanceName = resultSet.getString(1); + + final List topology = this.refresh(); + + if (topology == null) { + return null; + } + + return topology + .stream() + .filter(host -> Objects.equals(instanceName, host.getHostId())) + .findAny() + .orElse(null); + } + } catch (final SQLException e) { + throw new SQLException(Messages.get("RdsHostListProvider.errorIdentifyConnection"), e); + } + + throw new SQLException(Messages.get("RdsHostListProvider.errorIdentifyConnection")); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java index 48989d73c..a366119c0 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java @@ -197,7 +197,7 @@ public HostSpec getHostSpecByStrategy(HostRole role, String strategy) throw new SQLException(Messages.get("DefaultConnectionPlugin.noHostsAvailable")); } - return this.connProviderManager.getHostSpecByStrategy(hosts, role, strategy); + return this.connProviderManager.getHostSpecByStrategy(hosts, role, strategy, this.pluginService.getProperties()); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorImpl.java index b96dd6384..edf4990db 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorImpl.java @@ -202,7 +202,7 @@ public void run() { } else { delayMillis -= status.elapsedTimeNano; // Check for min delay between node health check - if (delayMillis < MIN_CONNECTION_CHECK_TIMEOUT_MILLIS) { + if (delayMillis <= 0) { delayMillis = MIN_CONNECTION_CHECK_TIMEOUT_MILLIS; } // Use this delay as node checkout timeout since it corresponds to min interval for all active contexts diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorThreadContainer.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorThreadContainer.java index ef7730e43..aa526e88d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorThreadContainer.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorThreadContainer.java @@ -54,19 +54,19 @@ public static MonitorThreadContainer getInstance() { } static MonitorThreadContainer getInstance(final ExecutorServiceInitializer executorServiceInitializer) { - if (singleton == null) { - LOCK_OBJECT.lock(); - try { - if (singleton == null) { - singleton = new MonitorThreadContainer(executorServiceInitializer); - CLASS_USAGE_COUNT.set(0); - } - } finally { - LOCK_OBJECT.unlock(); + MonitorThreadContainer singletonToReturn; + LOCK_OBJECT.lock(); + try { + if (singleton == null) { + singleton = new MonitorThreadContainer(executorServiceInitializer); + CLASS_USAGE_COUNT.set(0); } + singletonToReturn = singleton; + CLASS_USAGE_COUNT.getAndIncrement(); + } finally { + LOCK_OBJECT.unlock(); } - CLASS_USAGE_COUNT.getAndIncrement(); - return singleton; + return singletonToReturn; } /** @@ -77,18 +77,15 @@ public static void releaseInstance() { if (singleton == null) { return; } - - if (CLASS_USAGE_COUNT.decrementAndGet() <= 0) { - LOCK_OBJECT.lock(); - try { - if (singleton != null) { - singleton.releaseResources(); - singleton = null; - CLASS_USAGE_COUNT.set(0); - } - } finally { - LOCK_OBJECT.unlock(); + LOCK_OBJECT.lock(); + try { + if (singleton != null && CLASS_USAGE_COUNT.decrementAndGet() <= 0) { + singleton.releaseResources(); + singleton = null; + CLASS_USAGE_COUNT.set(0); } + } finally { + LOCK_OBJECT.unlock(); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java index ecb0e7a1a..85e665bbe 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java @@ -70,6 +70,8 @@ public class FailoverConnectionPlugin extends AbstractConnectionPlugin { } }); + private static final String METHOD_SET_READ_ONLY = "Connection.setReadOnly"; + private static final String METHOD_SET_AUTO_COMMIT = "Connection.setAutoCommit"; private static final String METHOD_GET_AUTO_COMMIT = "Connection.getAutoCommit"; private static final String METHOD_GET_CATALOG = "Connection.getCatalog"; private static final String METHOD_GET_SCHEMA = "Connection.getSchema"; @@ -84,6 +86,7 @@ public class FailoverConnectionPlugin extends AbstractConnectionPlugin { protected int failoverClusterTopologyRefreshRateMsSetting; protected int failoverWriterReconnectIntervalMsSetting; protected int failoverReaderConnectTimeoutMsSetting; + protected boolean keepSessionStateOnFailover; protected FailoverMode failoverMode; private boolean closedExplicitly = false; protected boolean isClosed = false; @@ -97,6 +100,8 @@ public class FailoverConnectionPlugin extends AbstractConnectionPlugin { private RdsUrlType rdsUrlType; private HostListProviderService hostListProviderService; private final AuroraStaleDnsHelper staleDnsHelper; + private Boolean savedReadOnlyStatus; + private Boolean savedAutoCommitStatus; public static final AwsWrapperProperty FAILOVER_CLUSTER_TOPOLOGY_REFRESH_RATE_MS = new AwsWrapperProperty( @@ -136,6 +141,11 @@ public class FailoverConnectionPlugin extends AbstractConnectionPlugin { "failoverMode", null, "Set node role to follow during failover."); + public static final AwsWrapperProperty KEEP_SESSION_STATE_ON_FAILOVER = + new AwsWrapperProperty( + "keepSessionStateOnFailover", "false", + "Allow connections to retain a partial previous session state after failover occurs."); + static { PropertyDefinition.registerPluginProperties(FailoverConnectionPlugin.class); } @@ -187,6 +197,14 @@ public T execute( } } + if (methodName.equals(METHOD_SET_READ_ONLY) && jdbcMethodArgs != null && jdbcMethodArgs.length > 0) { + this.savedReadOnlyStatus = (Boolean) jdbcMethodArgs[0]; + } + + if (methodName.equals(METHOD_SET_AUTO_COMMIT) && jdbcMethodArgs != null && jdbcMethodArgs.length > 0) { + this.savedAutoCommitStatus = (Boolean) jdbcMethodArgs[0]; + } + T result = null; try { @@ -328,10 +346,9 @@ private void initSettings() { this.failoverTimeoutMsSetting = FAILOVER_TIMEOUT_MS.getInteger(this.properties); this.failoverClusterTopologyRefreshRateMsSetting = FAILOVER_CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(this.properties); - this.failoverWriterReconnectIntervalMsSetting = - FAILOVER_WRITER_RECONNECT_INTERVAL_MS.getInteger(this.properties); - this.failoverReaderConnectTimeoutMsSetting = - FAILOVER_READER_CONNECT_TIMEOUT_MS.getInteger(this.properties); + this.failoverWriterReconnectIntervalMsSetting = FAILOVER_WRITER_RECONNECT_INTERVAL_MS.getInteger(this.properties); + this.failoverReaderConnectTimeoutMsSetting = FAILOVER_READER_CONNECT_TIMEOUT_MS.getInteger(this.properties); + this.keepSessionStateOnFailover = KEEP_SESSION_STATE_ON_FAILOVER.getBoolean(this.properties); } private void invalidInvocationOnClosedConnection() throws SQLException { @@ -495,6 +512,27 @@ protected void transferSessionState( to.setTransactionIsolation(from.getTransactionIsolation()); } + /** + * Restores partial session state from saved values to a connection. + * + * @param to The connection to transfer state to + * @throws SQLException if a database access error occurs, this method is called on a closed connection, this + * method is called during a distributed transaction, or this method is called during a + * transaction + */ + protected void restoreSessionState(final Connection to) throws SQLException { + if (to == null) { + return; + } + + if (savedReadOnlyStatus != null) { + to.setReadOnly(savedReadOnlyStatus); + } + if (savedAutoCommitStatus != null) { + to.setAutoCommit(savedAutoCommitStatus); + } + } + private void dealWithOriginalException( final Throwable originalException, final Throwable wrapperException, @@ -599,6 +637,9 @@ protected void failoverReader(final HostSpec failedHostSpec) throws SQLException return; } + if (keepSessionStateOnFailover) { + restoreSessionState(result.getConnection()); + } this.pluginService.setCurrentConnection(result.getConnection(), result.getHost()); this.pluginService.getCurrentHostSpec().removeAlias(oldAliases.toArray(new String[]{})); @@ -627,6 +668,9 @@ protected void failoverWriter() throws SQLException { // successfully re-connected to a writer node final HostSpec writerHostSpec = getWriter(failoverResult.getTopology()); + if (keepSessionStateOnFailover) { + restoreSessionState(failoverResult.getNewConnection()); + } this.pluginService.setCurrentConnection(failoverResult.getNewConnection(), writerHostSpec); LOGGER.fine( @@ -727,6 +771,12 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro this.staleDnsHelper.getVerifiedConnection(isInitialConnection, this.hostListProviderService, driverProtocol, hostSpec, props, connectFunc); + if (this.keepSessionStateOnFailover) { + this.savedReadOnlyStatus = this.savedReadOnlyStatus == null ? conn.isReadOnly() : this.savedReadOnlyStatus; + this.savedAutoCommitStatus = + this.savedAutoCommitStatus == null ? conn.getAutoCommit() : this.savedAutoCommitStatus; + } + if (isInitialConnection) { this.pluginService.refreshHostList(conn); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java index ff4df9f8c..d79dfd776 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java @@ -27,6 +27,7 @@ import java.util.logging.Logger; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.AwsWrapperProperty; +import software.amazon.jdbc.ConnectionProviderManager; import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; @@ -34,6 +35,7 @@ import software.amazon.jdbc.NodeChangeOptions; import software.amazon.jdbc.OldConnectionSuggestedAction; import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.PooledConnectionProvider; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.cleanup.CanReleaseResources; import software.amazon.jdbc.plugin.AbstractConnectionPlugin; @@ -62,11 +64,14 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin private final PluginService pluginService; private final Properties properties; private final String readerSelectorStrategy; + private final ConnectionProviderManager connProviderManager; private volatile boolean inReadWriteSplit = false; private HostListProviderService hostListProviderService; private Connection writerConnection; private Connection readerConnection; private HostSpec readerHostSpec; + private boolean isReaderConnFromInternalPool; + private boolean isWriterConnFromInternalPool; public static final AwsWrapperProperty READER_HOST_SELECTOR_STRATEGY = new AwsWrapperProperty( @@ -82,6 +87,7 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin this.pluginService = pluginService; this.properties = properties; this.readerSelectorStrategy = READER_HOST_SELECTOR_STRATEGY.getString(properties); + this.connProviderManager = new ConnectionProviderManager(pluginService.getConnectionProvider()); } /** @@ -131,6 +137,7 @@ public Connection connect( Messages.get("ReadWriteSplittingPlugin.unsupportedHostSpecSelectorStrategy", new Object[] { this.readerSelectorStrategy })); } + return connectInternal(isInitialConnection, connectFunc); } @@ -263,6 +270,11 @@ private boolean isReader(final @NonNull HostSpec hostSpec) { private void getNewWriterConnection(final HostSpec writerHostSpec) throws SQLException { final Connection conn = this.pluginService.connect(writerHostSpec, this.properties); + this.isWriterConnFromInternalPool = this.connProviderManager.getConnectionProvider( + this.pluginService.getDriverProtocol(), + writerHostSpec, + this.properties) + instanceof PooledConnectionProvider; setWriterConnection(conn, writerHostSpec); switchCurrentConnectionTo(this.writerConnection, writerHostSpec); } @@ -379,6 +391,10 @@ private synchronized void switchToWriterConnection( switchCurrentConnectionTo(this.writerConnection, writerHost); } + if (this.isReaderConnFromInternalPool) { + this.closeConnectionIfIdle(this.readerConnection); + } + LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromReaderToWriter", new Object[] {writerHost.getUrl()})); } @@ -452,6 +468,10 @@ private synchronized void switchToReaderConnection(final List hosts) initializeReaderConnection(hosts); } } + + if (this.isWriterConnFromInternalPool) { + this.closeConnectionIfIdle(this.writerConnection); + } } private void initializeReaderConnection(final @NonNull List hosts) throws SQLException { @@ -494,6 +514,11 @@ private void getNewReaderConnection() throws SQLException { HostSpec hostSpec = this.pluginService.getHostSpecByStrategy(HostRole.READER, this.readerSelectorStrategy); try { conn = this.pluginService.connect(hostSpec, this.properties); + this.isReaderConnFromInternalPool = this.connProviderManager.getConnectionProvider( + this.pluginService.getDriverProtocol(), + hostSpec, + this.properties) + instanceof PooledConnectionProvider; readerHost = hostSpec; break; } catch (final SQLException e) { @@ -534,7 +559,7 @@ private void closeIdleConnections() { closeConnectionIfIdle(this.writerConnection); } - private void closeConnectionIfIdle(final Connection internalConnection) { + void closeConnectionIfIdle(final Connection internalConnection) { final Connection currentConnection = this.pluginService.getCurrentConnection(); try { if (internalConnection != null diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/Messages.java b/wrapper/src/main/java/software/amazon/jdbc/util/Messages.java index c4f7e85c9..5b3e2e978 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/Messages.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/Messages.java @@ -21,7 +21,7 @@ public class Messages { - private static final ResourceBundle MESSAGES = ResourceBundle.getBundle("messages"); + private static final ResourceBundle MESSAGES = ResourceBundle.getBundle("aws_advanced_jdbc_wrapper_messages"); private static final Object[] emptyArgs = {}; /** diff --git a/wrapper/src/main/resources/messages.properties b/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties similarity index 89% rename from wrapper/src/main/resources/messages.properties rename to wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties index 1c72ecd59..335baad50 100644 --- a/wrapper/src/main/resources/messages.properties +++ b/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties @@ -18,16 +18,16 @@ AuroraHostListConnectionPlugin.providerAlreadySet=Another dynamic host list provider has already been set: {0}. # Aurora Host List Provider -AuroraHostListProvider.clusterInstanceHostPatternRequired=The ''clusterInstanceHostPattern'' configuration property is required when an IP address or custom domain is used to connect to a cluster that provides topology information. If you would instead like to connect without failover functionality, set the 'enableClusterAwareFailover' configuration property to false. -AuroraHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy=An RDS Proxy url can''t be used as the 'clusterInstanceHostPattern' configuration setting. -AuroraHostListProvider.invalidPattern=Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host pattern must contain a '?' character as a placeholder for the DB instance identifiers of the instances in the cluster. -AuroraHostListProvider.invalidTopology=The topology query returned an invalid topology - no writer instance detected. -AuroraHostListProvider.suggestedClusterId=ClusterId ''{0}'' is suggested for url ''{1}''. -AuroraHostListProvider.parsedListEmpty=Can''t parse connection string: ''{0}'' -AuroraHostListProvider.invalidQuery=Error obtaining host list. Provided database might not be an Aurora Db cluster -AuroraHostListProvider.errorGettingHostRole=An error occurred while obtaining the connected host's role. This could occur if the connection is broken or if you are not connected to an Aurora database. -AuroraHostListProvider.errorIdentifyConnection=An error occurred while obtaining the connection's host ID. -AuroraHostListProvider.errorGettingNetworkTimeout=An error occurred while getting the connection network timeout: {0} +RdsHostListProvider.clusterInstanceHostPatternRequired=The ''clusterInstanceHostPattern'' configuration property is required when an IP address or custom domain is used to connect to a cluster that provides topology information. If you would instead like to connect without failover functionality, set the 'enableClusterAwareFailover' configuration property to false. +RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy=An RDS Proxy url can''t be used as the 'clusterInstanceHostPattern' configuration setting. +RdsHostListProvider.invalidPattern=Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host pattern must contain a '?' character as a placeholder for the DB instance identifiers of the instances in the cluster. +RdsHostListProvider.invalidTopology=The topology query returned an invalid topology - no writer instance detected. +RdsHostListProvider.suggestedClusterId=ClusterId ''{0}'' is suggested for url ''{1}''. +RdsHostListProvider.parsedListEmpty=Can''t parse connection string: ''{0}'' +RdsHostListProvider.invalidQuery=Error obtaining host list. Provided database might not be an Aurora Db cluster +RdsHostListProvider.errorGettingHostRole=An error occurred while obtaining the connected host's role. This could occur if the connection is broken or if you are not connected to an Aurora database. +RdsHostListProvider.errorIdentifyConnection=An error occurred while obtaining the connection's host ID. +RdsHostListProvider.errorGettingNetworkTimeout=An error occurred while getting the connection network timeout: {0} # AWS Credentials Manager AwsCredentialsManager.nullProvider=The configured AwsCredentialsProvider was null. If you have configured the AwsCredentialsManager to use a custom AwsCredentialsProviderHandler, please ensure the handler does not return null. @@ -121,6 +121,9 @@ Driver.missingDriver=Can''t find the target driver for ''{0}''. Please ensure th Driver.notRegistered=Driver is not registered (or it has not been registered using Driver.register() method). Driver.urlParsingFailed=Url [{0}] parsing failed with error: [{1}] +# DataSource +DataSource.failedToSetProperty=Failed to set property ''{0}'' on target datasource ''{1}''. + # Execution Time Connection Plugin ExecutionTimeConnectionPlugin.executionTime=Executed {0} in {1} nanos. @@ -160,6 +163,8 @@ HostMonitoringConnectionPlugin.unableToIdentifyConnection=Unable to identify the # HostSelector HostSelector.noHostsMatchingRole=No hosts were found matching the requested ''{0}'' role. +HostSelector.roundRobinInvalidHostWeightPairs=The provided host weight pairs have not been configured correctly. Please ensure the provided host weight pairs is a comma separated list of pairs, each pair in the format of :. Weight values must be an integer greater than or equal to the default weight value of 1. +HostSelector.roundRobinInvalidDefaultWeight=The provided default weight value is not valid. Weight values must be an integer greater than or equal to the default weight value of 1. # IAM Auth Connection Plugin IamAuthConnectionPlugin.unsupportedHostname=Unsupported AWS hostname {0}. Amazon domain name in format *.AWS-Region.rds.amazonaws.com or *.rds.AWS-Region.amazonaws.com.cn is expected. diff --git a/wrapper/src/test/java/integration/container/tests/AuroraConnectivityTests.java b/wrapper/src/test/java/integration/container/tests/AuroraConnectivityTests.java new file mode 100644 index 000000000..3c3e4a472 --- /dev/null +++ b/wrapper/src/test/java/integration/container/tests/AuroraConnectivityTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package integration.container.tests; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import integration.DatabaseEngineDeployment; +import integration.DriverHelper; +import integration.TestEnvironmentFeatures; +import integration.container.ConnectionStringHelper; +import integration.container.TestDriver; +import integration.container.TestDriverProvider; +import integration.container.TestEnvironment; +import integration.container.condition.DisableOnTestFeature; +import integration.container.condition.EnableOnDatabaseEngineDeployment; +import integration.container.condition.EnableOnNumOfInstances; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import software.amazon.jdbc.PropertyDefinition; + +@TestMethodOrder(MethodOrderer.MethodName.class) +@ExtendWith(TestDriverProvider.class) +@EnableOnNumOfInstances(min = 2) +@EnableOnDatabaseEngineDeployment(DatabaseEngineDeployment.AURORA) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) +public class AuroraConnectivityTests { + + private static final Logger LOGGER = Logger.getLogger(AuroraConnectivityTests.class.getName()); + + @TestTemplate + @ExtendWith(TestDriverProvider.class) + public void test_WrapperConnectionReaderClusterWithEfmEnabled(TestDriver testDriver) throws SQLException { + LOGGER.info(testDriver.toString()); + + final Properties props = new Properties(); + props.setProperty( + PropertyDefinition.USER.name, + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getUsername()); + props.setProperty( + PropertyDefinition.PASSWORD.name, + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getPassword()); + DriverHelper.setConnectTimeout(testDriver, props, 10, TimeUnit.SECONDS); + DriverHelper.setSocketTimeout(testDriver, props, 10, TimeUnit.SECONDS); + props.setProperty(PropertyDefinition.PLUGINS.name, "efm"); + + String url = ConnectionStringHelper.getWrapperReaderClusterUrl(); + LOGGER.finest("Connecting to " + url); + + try (final Connection conn = DriverManager.getConnection(url, props)) { + assertTrue(conn.isValid(5)); + + Statement stmt = conn.createStatement(); + stmt.executeQuery("SELECT 1"); + ResultSet rs = stmt.getResultSet(); + rs.next(); + assertEquals(1, rs.getInt(1)); + } + } +} diff --git a/wrapper/src/test/java/integration/container/tests/AuroraFailoverTest.java b/wrapper/src/test/java/integration/container/tests/AuroraFailoverTest.java index d1234f1c8..fc87f85ff 100644 --- a/wrapper/src/test/java/integration/container/tests/AuroraFailoverTest.java +++ b/wrapper/src/test/java/integration/container/tests/AuroraFailoverTest.java @@ -311,6 +311,9 @@ public void test_writerFailWithinTransaction_startTransaction() assertEquals(0, rs.getInt(1)); testStmt3.executeUpdate("DROP TABLE IF EXISTS test3_3"); + + // Assert autocommit is reset to true after failover. + assertTrue(conn.getAutoCommit()); } } @@ -455,6 +458,75 @@ public void test_takeOverConnectionProperties() throws SQLException, Interrupted } } + /** + * Current writer dies, a reader instance is nominated to be a new writer, failover to the new + * writer. Autocommit is set to false and the keepSessionStateOnFailover property is set to true. + */ + @TestTemplate + public void test_failFromWriterWhereKeepSessionStateOnFailoverIsTrue() + throws SQLException, InterruptedException { + + final String initialWriterId = this.currentWriter; + TestInstanceInfo initialWriterInstanceInfo = + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getInstance(initialWriterId); + + final Properties props = initDefaultProps(); + props.setProperty("keepSessionStateOnFailover", "true"); + + try (final Connection conn = + DriverManager.getConnection( + ConnectionStringHelper.getWrapperUrl( + initialWriterInstanceInfo.getHost(), + initialWriterInstanceInfo.getPort(), + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()), + props)) { + conn.setAutoCommit(false); + + final Statement testStmt1 = conn.createStatement(); + testStmt1.executeUpdate("DROP TABLE IF EXISTS test3_3"); + testStmt1.executeUpdate( + "CREATE TABLE test3_3 (id int not null primary key, test3_3_field varchar(255) not null)"); + conn.setAutoCommit(false); // open a new transaction + conn.commit(); + + final Statement testStmt2 = conn.createStatement(); + testStmt2.executeUpdate("INSERT INTO test3_3 VALUES (1, 'test field string 1')"); + + auroraUtil.failoverClusterAndWaitUntilWriterChanged(); + + // If there is an active transaction, roll it back and return an error with SQLState 08007. + final SQLException exception = + assertThrows( + SQLException.class, + () -> + testStmt2.executeUpdate("INSERT INTO test3_3 VALUES (2, 'test field string 2')")); + assertEquals( + SqlState.CONNECTION_FAILURE_DURING_TRANSACTION.getState(), exception.getSQLState()); + + // Attempt to query the instance id. + final String currentConnectionId = auroraUtil.queryInstanceId(conn); + // Assert that we are connected to the new writer after failover happens. + assertTrue(auroraUtil.isDBInstanceWriter(currentConnectionId)); + final String nextClusterWriterId = auroraUtil.getDBClusterWriterInstanceId(); + assertEquals(currentConnectionId, nextClusterWriterId); + assertNotEquals(initialWriterId, nextClusterWriterId); + + // testStmt2 can NOT be used anymore since it's invalid + + final Statement testStmt3 = conn.createStatement(); + final ResultSet rs = testStmt3.executeQuery("SELECT count(*) from test3_3"); + rs.next(); + // Assert that NO row has been inserted to the table; + assertEquals(0, rs.getInt(1)); + + testStmt3.executeUpdate("DROP TABLE IF EXISTS test3_3"); + conn.commit(); + + // Assert autocommit is still false after failover. + assertFalse(conn.getAutoCommit()); + } + } + // Helper methods below protected Properties initDefaultProps() { diff --git a/wrapper/src/test/java/integration/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/container/tests/AutoscalingTests.java index 511bde609..ee2c723f2 100644 --- a/wrapper/src/test/java/integration/container/tests/AutoscalingTests.java +++ b/wrapper/src/test/java/integration/container/tests/AutoscalingTests.java @@ -43,7 +43,6 @@ import java.util.List; import java.util.Properties; import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.TestTemplate; @@ -65,15 +64,6 @@ public class AutoscalingTests { protected static final AuroraTestUtility auroraUtil = new AuroraTestUtility(TestEnvironment.getCurrent().getInfo().getAuroraRegion()); - private static final Logger LOGGER = Logger.getLogger(AutoscalingTests.class.getName()); - - protected static Properties getProxiedProps() { - final Properties props = getProps(); - AuroraHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set(props, - "?." + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo() - .getInstanceEndpointSuffix()); - return props; - } protected static Properties getDefaultPropsNoPlugins() { final Properties props = ConnectionStringHelper.getDefaultProperties(); @@ -84,7 +74,7 @@ protected static Properties getDefaultPropsNoPlugins() { protected static Properties getProps() { final Properties props = getDefaultPropsNoPlugins(); - PropertyDefinition.PLUGINS.set(props, "auroraHostList,readWriteSplitting"); + PropertyDefinition.PLUGINS.set(props, "readWriteSplitting"); return props; } diff --git a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java index e1f7d1347..13e3846d5 100644 --- a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java +++ b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java @@ -54,6 +54,7 @@ import org.junit.jupiter.api.TestTemplate; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.provider.Arguments; +import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin; import software.amazon.jdbc.util.StringUtils; @TestMethodOrder(MethodOrderer.MethodName.class) @@ -163,7 +164,7 @@ private void execute_FailureDetectionTime_EnhancedMonitoringEnabled( PLUGINS.set(props, "efm"); final PerfStatMonitoring data = new PerfStatMonitoring(); - doMeasurePerformance(sleepDelayMillis, REPEAT_TIMES, props, false, data); + doMeasurePerformance(sleepDelayMillis, REPEAT_TIMES, props, data); data.paramDetectionTime = detectionTime; data.paramDetectionInterval = detectionInterval; data.paramDetectionCount = detectionCount; @@ -224,9 +225,10 @@ private void execute_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled( .getInfo() .getProxyDatabaseInfo() .getInstanceEndpointSuffix()); + FailoverConnectionPlugin.FAILOVER_MODE.set(props, "strict-reader"); final PerfStatMonitoring data = new PerfStatMonitoring(); - doMeasurePerformance(sleepDelayMillis, REPEAT_TIMES, props, true, data); + doMeasurePerformance(sleepDelayMillis, REPEAT_TIMES, props, data); data.paramDetectionTime = detectionTime; data.paramDetectionInterval = detectionInterval; data.paramDetectionCount = detectionCount; @@ -279,9 +281,10 @@ private void execute_FailoverTime_SocketTimeout(int socketTimeout, int sleepDela .getProxyDatabaseInfo() .getInstanceEndpointSuffix()); props.setProperty("failoverTimeoutMs", Integer.toString(PERF_FAILOVER_TIMEOUT_MS)); + FailoverConnectionPlugin.FAILOVER_MODE.set(props, "strict-reader"); final PerfStatSocketTimeout data = new PerfStatSocketTimeout(); - doMeasurePerformance(sleepDelayMillis, REPEAT_TIMES, props, true, data); + doMeasurePerformance(sleepDelayMillis, REPEAT_TIMES, props, data); data.paramSocketTimeout = socketTimeout; failoverWithSocketTimeoutPerfDataList.add(data); } @@ -290,7 +293,6 @@ private void doMeasurePerformance( int sleepDelayMillis, int repeatTimes, Properties props, - boolean openReadOnlyConnection, PerfStatBase data) throws SQLException { @@ -324,7 +326,6 @@ private void doMeasurePerformance( try (final Connection conn = openConnectionWithRetry(props); final Statement statement = conn.createStatement()) { - conn.setReadOnly(openReadOnlyConnection); thread.start(); // Execute long query diff --git a/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingTests.java b/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingTests.java index 60d89ac8c..b499b2427 100644 --- a/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingTests.java +++ b/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingTests.java @@ -65,7 +65,6 @@ import software.amazon.jdbc.HikariPooledConnectionProvider; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; -import software.amazon.jdbc.hostlistprovider.ConnectionStringHostListProvider; import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin; import software.amazon.jdbc.plugin.failover.FailoverFailedSQLException; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; @@ -114,7 +113,7 @@ protected static Properties getDefaultPropsNoPlugins() { protected static Properties getProps() { final Properties props = getDefaultPropsNoPlugins(); - PropertyDefinition.PLUGINS.set(props, "auroraHostList,readWriteSplitting"); + PropertyDefinition.PLUGINS.set(props, "readWriteSplitting"); return props; } diff --git a/wrapper/src/test/java/integration/util/AuroraTestUtility.java b/wrapper/src/test/java/integration/util/AuroraTestUtility.java index 3e3ffd74e..89f6fd297 100644 --- a/wrapper/src/test/java/integration/util/AuroraTestUtility.java +++ b/wrapper/src/test/java/integration/util/AuroraTestUtility.java @@ -92,7 +92,7 @@ public class AuroraTestUtility { private String dbName = "test"; private String dbIdentifier = "test-identifier"; private String dbEngine = "aurora-postgresql"; - private String dbEngineVersion = "13.7"; + private String dbEngineVersion = "13.9"; private String dbInstanceClass = "db.r5.large"; private final Region dbRegion; private final String dbSecGroup = "default"; diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index ca94e5b72..1f5279520 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -187,9 +187,7 @@ public void testLeastConnectionsStrategy() throws SQLException { provider = new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); provider.setDatabasePools(getTestPoolMap()); - assertThrows(UnsupportedOperationException.class, () -> - provider.getHostSpecByStrategy(testHosts, HostRole.READER, "random")); - HostSpec selectedHost = provider.getHostSpecByStrategy(testHosts, HostRole.READER, LEAST_CONNECTIONS); + HostSpec selectedHost = provider.getHostSpecByStrategy(testHosts, HostRole.READER, LEAST_CONNECTIONS, defaultProps); // Other reader has 2 connections assertEquals(readerUrl1Connection, selectedHost.getHost()); } diff --git a/wrapper/src/test/java/software/amazon/jdbc/ds/AwsWrapperDataSourceTest.java b/wrapper/src/test/java/software/amazon/jdbc/ds/AwsWrapperDataSourceTest.java index e2046b2ef..a5110543e 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/ds/AwsWrapperDataSourceTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/ds/AwsWrapperDataSourceTest.java @@ -20,6 +20,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; import integration.container.TestDriver; import integration.container.condition.DisableOnTestDriver; @@ -33,8 +34,8 @@ import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.MockitoAnnotations; +import org.postgresql.ds.PGSimpleDataSource; import software.amazon.jdbc.wrapper.ConnectionWrapper; class AwsWrapperDataSourceTest { @@ -49,7 +50,7 @@ class AwsWrapperDataSourceTest { @BeforeEach void setUp() throws SQLException { closeable = MockitoAnnotations.openMocks(this); - ds = Mockito.spy(new AwsWrapperDataSource()); + ds = spy(new AwsWrapperDataSource()); ds.setTargetDataSourceClassName("org.postgresql.ds.PGSimpleDataSource"); doReturn(mockConnection) .when(ds) @@ -225,4 +226,29 @@ public void testConnectionWithUrlMissingPassword() { assertThrows(SQLException.class, () -> ds.getConnection("user", "")); } + + @Test + public void testSetLoginTimeout() throws SQLException { + ds.setLoginTimeout(30); + assertEquals(30, ds.getLoginTimeout()); + assertThrows(SQLException.class, () -> ds.setLoginTimeout(-100)); + } + + @Test + @DisableOnTestDriver(TestDriver.MARIADB) + public void testSetLoginTimeoutOnTargetDataSource() throws SQLException { + PGSimpleDataSource simpleDS = new PGSimpleDataSource(); + doReturn(simpleDS).when(ds).createTargetDataSource(); + + ds.setJdbcUrl("jdbc:postgresql://testserver/"); + + try (final Connection conn = ds.getConnection()) { + assertEquals(0, simpleDS.getLoginTimeout()); + } + + ds.setLoginTimeout(500); + try (final Connection conn = ds.getConnection()) { + assertEquals(500, simpleDS.getLoginTimeout()); + } + } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java similarity index 71% rename from wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProviderTest.java rename to wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java index dbb60d4b1..aaef62ac7 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java @@ -62,12 +62,12 @@ import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider.FetchTopologyResult; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider.FetchTopologyResult; -class AuroraHostListProviderTest { +class RdsHostListProviderTest { private final long defaultRefreshRateNano = TimeUnit.SECONDS.toNanos(5); - private AuroraHostListProvider auroraHostListProvider; + private RdsHostListProvider rdsHostListProvider; @Mock private Connection mockConnection; @Mock private Statement mockStatement; @@ -99,14 +99,14 @@ void setUp() throws SQLException { @AfterEach void tearDown() throws Exception { - AuroraHostListProvider.clearAll(); + RdsHostListProvider.clearAll(); closeable.close(); } - private AuroraHostListProvider getAuroraHostListProvider( + private RdsHostListProvider getRdsHostListProvider( HostListProviderService mockHostListProviderService, String originalUrl) throws SQLException { - AuroraHostListProvider provider = new AuroraHostListProvider( + RdsHostListProvider provider = new RdsHostListProvider( new Properties(), originalUrl, mockHostListProviderService, @@ -118,65 +118,65 @@ private AuroraHostListProvider getAuroraHostListProvider( @Test void testGetTopology_returnCachedTopology() throws SQLException { - auroraHostListProvider = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, "protocol://url/")); + rdsHostListProvider = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "protocol://url/")); final Instant lastUpdated = Instant.now(); final List expected = hosts; - AuroraHostListProvider.topologyCache.put(auroraHostListProvider.clusterId, expected, defaultRefreshRateNano); + RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, expected, defaultRefreshRateNano); - final FetchTopologyResult result = auroraHostListProvider.getTopology(mockConnection, false); + final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, false); assertEquals(expected, result.hosts); assertEquals(2, result.hosts.size()); - verify(auroraHostListProvider, never()).queryForTopology(mockConnection); + verify(rdsHostListProvider, never()).queryForTopology(mockConnection); } @Test void testGetTopology_withForceUpdate_returnsUpdatedTopology() throws SQLException { - auroraHostListProvider = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); - auroraHostListProvider.isInitialized = true; + rdsHostListProvider = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider.isInitialized = true; - AuroraHostListProvider.topologyCache.put(auroraHostListProvider.clusterId, hosts, defaultRefreshRateNano); + RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, hosts, defaultRefreshRateNano); final List newHosts = Collections.singletonList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("newHost").build()); - doReturn(newHosts).when(auroraHostListProvider).queryForTopology(mockConnection); + doReturn(newHosts).when(rdsHostListProvider).queryForTopology(mockConnection); - final FetchTopologyResult result = auroraHostListProvider.getTopology(mockConnection, true); - verify(auroraHostListProvider, atMostOnce()).queryForTopology(mockConnection); + final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, true); + verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); assertEquals(1, result.hosts.size()); assertEquals(newHosts, result.hosts); } @Test void testGetTopology_noForceUpdate_queryReturnsEmptyHostList() throws SQLException { - auroraHostListProvider = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); - auroraHostListProvider.clusterId = "cluster-id"; - auroraHostListProvider.isInitialized = true; + rdsHostListProvider = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider.clusterId = "cluster-id"; + rdsHostListProvider.isInitialized = true; final List expected = hosts; - AuroraHostListProvider.topologyCache.put(auroraHostListProvider.clusterId, expected, defaultRefreshRateNano); + RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, expected, defaultRefreshRateNano); - doReturn(new ArrayList<>()).when(auroraHostListProvider).queryForTopology(mockConnection); + doReturn(new ArrayList<>()).when(rdsHostListProvider).queryForTopology(mockConnection); - final FetchTopologyResult result = auroraHostListProvider.getTopology(mockConnection, false); - verify(auroraHostListProvider, atMostOnce()).queryForTopology(mockConnection); + final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, false); + verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); assertEquals(2, result.hosts.size()); assertEquals(expected, result.hosts); } @Test void testGetTopology_withForceUpdate_returnsInitialHostList() throws SQLException { - auroraHostListProvider = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); - auroraHostListProvider.clear(); + rdsHostListProvider = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider.clear(); - doReturn(new ArrayList<>()).when(auroraHostListProvider).queryForTopology(mockConnection); + doReturn(new ArrayList<>()).when(rdsHostListProvider).queryForTopology(mockConnection); - final FetchTopologyResult result = auroraHostListProvider.getTopology(mockConnection, true); - verify(auroraHostListProvider, atMostOnce()).queryForTopology(mockConnection); + final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, true); + verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); assertNotNull(result.hosts); assertEquals( Arrays.asList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("url").build()), @@ -196,65 +196,65 @@ void testQueryForTopology_withDifferentDriverProtocol() throws SQLException { when(mockResultSet.getString(eq(1))).thenReturn("mysql"); - auroraHostListProvider = - getAuroraHostListProvider(mockHostListProviderService, "mysql://url/"); + rdsHostListProvider = + getRdsHostListProvider(mockHostListProviderService, "mysql://url/"); - List hosts = auroraHostListProvider.queryForTopology(mockConnection); + List hosts = rdsHostListProvider.queryForTopology(mockConnection); assertEquals(expectedMySQL, hosts); when(mockResultSet.next()).thenReturn(true, false); when(mockResultSet.getString(eq(1))).thenReturn("postgresql"); - auroraHostListProvider = - getAuroraHostListProvider(mockHostListProviderService, "postgresql://url/"); - hosts = auroraHostListProvider.queryForTopology(mockConnection); + rdsHostListProvider = + getRdsHostListProvider(mockHostListProviderService, "postgresql://url/"); + hosts = rdsHostListProvider.queryForTopology(mockConnection); assertEquals(expectedPostgres, hosts); } @Test void testQueryForTopology_queryResultsInException() throws SQLException { - auroraHostListProvider = - getAuroraHostListProvider(mockHostListProviderService, "protocol://url/"); + rdsHostListProvider = + getRdsHostListProvider(mockHostListProviderService, "protocol://url/"); when(mockStatement.executeQuery(queryCaptor.capture())).thenThrow(new SQLSyntaxErrorException()); assertThrows( SQLException.class, - () -> auroraHostListProvider.queryForTopology(mockConnection)); + () -> rdsHostListProvider.queryForTopology(mockConnection)); } @Test void testGetCachedTopology_returnCachedTopology() throws SQLException { - auroraHostListProvider = getAuroraHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); + rdsHostListProvider = getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); final List expected = hosts; - AuroraHostListProvider.topologyCache.put(auroraHostListProvider.clusterId, expected, defaultRefreshRateNano); + RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, expected, defaultRefreshRateNano); - final List result = auroraHostListProvider.getCachedTopology(); + final List result = rdsHostListProvider.getCachedTopology(); assertEquals(expected, result); } @Test void testGetCachedTopology_returnNull() throws InterruptedException, SQLException { - auroraHostListProvider = getAuroraHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); + rdsHostListProvider = getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); // Test getCachedTopology with empty topology. - assertNull(auroraHostListProvider.getCachedTopology()); - auroraHostListProvider.clear(); + assertNull(rdsHostListProvider.getCachedTopology()); + rdsHostListProvider.clear(); - auroraHostListProvider = getAuroraHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); + rdsHostListProvider = getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); final long refreshRateOneNanosecond = 1; - AuroraHostListProvider.topologyCache.put(auroraHostListProvider.clusterId, hosts, refreshRateOneNanosecond); + RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, hosts, refreshRateOneNanosecond); TimeUnit.NANOSECONDS.sleep(1); // Test getCachedTopology with expired cache. - assertNull(auroraHostListProvider.getCachedTopology()); + assertNull(rdsHostListProvider.getCachedTopology()); } @Test void testTopologyCache_NoSuggestedClusterId() throws SQLException { - AuroraHostListProvider.clearAll(); + RdsHostListProvider.clearAll(); - AuroraHostListProvider provider1 = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, + RdsHostListProvider provider1 = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:something://cluster-a.domain.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( @@ -268,13 +268,13 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException { doReturn(topologyClusterA) .when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, AuroraHostListProvider.topologyCache.size()); + assertEquals(0, RdsHostListProvider.topologyCache.size()); final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - AuroraHostListProvider provider2 = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, + RdsHostListProvider provider2 = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:something://cluster-b.domain.com/")); provider2.init(); assertNull(provider2.getCachedTopology()); @@ -291,15 +291,15 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException { final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterB, topologyProvider2); - assertEquals(2, AuroraHostListProvider.topologyCache.size()); + assertEquals(2, RdsHostListProvider.topologyCache.size()); } @Test void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { - AuroraHostListProvider.clearAll(); + RdsHostListProvider.clearAll(); - AuroraHostListProvider provider1 = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, + RdsHostListProvider provider1 = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( @@ -321,13 +321,13 @@ void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, AuroraHostListProvider.topologyCache.size()); + assertEquals(0, RdsHostListProvider.topologyCache.size()); final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - AuroraHostListProvider provider2 = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, + RdsHostListProvider provider2 = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); @@ -338,15 +338,15 @@ void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); - assertEquals(1, AuroraHostListProvider.topologyCache.size()); + assertEquals(1, RdsHostListProvider.topologyCache.size()); } @Test void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { - AuroraHostListProvider.clearAll(); + RdsHostListProvider.clearAll(); - AuroraHostListProvider provider1 = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, + RdsHostListProvider provider1 = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( @@ -368,13 +368,13 @@ void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, AuroraHostListProvider.topologyCache.size()); + assertEquals(0, RdsHostListProvider.topologyCache.size()); final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - AuroraHostListProvider provider2 = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, + RdsHostListProvider provider2 = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:something://instance-a-3.xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); @@ -385,15 +385,15 @@ void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); - assertEquals(1, AuroraHostListProvider.topologyCache.size()); + assertEquals(1, RdsHostListProvider.topologyCache.size()); } @Test void testTopologyCache_AcceptSuggestion() throws SQLException { - AuroraHostListProvider.clearAll(); + RdsHostListProvider.clearAll(); - AuroraHostListProvider provider1 = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, + RdsHostListProvider provider1 = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:something://instance-a-2.xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( @@ -415,15 +415,15 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { doAnswer(a -> topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, AuroraHostListProvider.topologyCache.size()); + assertEquals(0, RdsHostListProvider.topologyCache.size()); List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - // AuroraHostListProvider.logCache(); + // RdsHostListProvider.logCache(); - AuroraHostListProvider provider2 = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, + RdsHostListProvider provider2 = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); @@ -435,11 +435,11 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { assertNotEquals(provider1.clusterId, provider2.clusterId); assertFalse(provider1.isPrimaryClusterId); assertTrue(provider2.isPrimaryClusterId); - assertEquals(2, AuroraHostListProvider.topologyCache.size()); + assertEquals(2, RdsHostListProvider.topologyCache.size()); assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/", - AuroraHostListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId)); + RdsHostListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId)); - // AuroraHostListProvider.logCache(); + // RdsHostListProvider.logCache(); topologyProvider1 = provider1.forceRefresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); @@ -447,35 +447,35 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { assertTrue(provider1.isPrimaryClusterId); assertTrue(provider2.isPrimaryClusterId); - // AuroraHostListProvider.logCache(); + // RdsHostListProvider.logCache(); } @Test void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException { - auroraHostListProvider = Mockito.spy(getAuroraHostListProvider( + rdsHostListProvider = Mockito.spy(getRdsHostListProvider( mockHostListProviderService, "jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(false); - assertThrows(SQLException.class, () -> auroraHostListProvider.identifyConnection(mockConnection)); + assertThrows(SQLException.class, () -> rdsHostListProvider.identifyConnection(mockConnection)); when(mockConnection.createStatement()).thenThrow(new SQLException("exception")); - assertThrows(SQLException.class, () -> auroraHostListProvider.identifyConnection(mockConnection)); + assertThrows(SQLException.class, () -> rdsHostListProvider.identifyConnection(mockConnection)); } @Test void testIdentifyConnectionNullTopology() throws SQLException { - auroraHostListProvider = Mockito.spy(getAuroraHostListProvider( + rdsHostListProvider = Mockito.spy(getRdsHostListProvider( mockHostListProviderService, "jdbc:someprotocol://url")); - auroraHostListProvider.clusterInstanceTemplate = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + rdsHostListProvider.clusterInstanceTemplate = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) .host("?.pattern").build(); when(mockResultSet.next()).thenReturn(true); when(mockResultSet.getString(eq(1))).thenReturn("instance-1"); - when(auroraHostListProvider.refresh(eq(mockConnection))).thenReturn(null); + when(rdsHostListProvider.refresh(eq(mockConnection))).thenReturn(null); - assertNull(auroraHostListProvider.identifyConnection(mockConnection)); + assertNull(rdsHostListProvider.identifyConnection(mockConnection)); } @Test @@ -487,14 +487,14 @@ void testIdentifyConnectionHostNotInTopology() throws SQLException { .role(HostRole.WRITER) .build()); - auroraHostListProvider = Mockito.spy(getAuroraHostListProvider( + rdsHostListProvider = Mockito.spy(getRdsHostListProvider( mockHostListProviderService, "jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(true); when(mockResultSet.getString(eq(1))).thenReturn("instance-1"); - when(auroraHostListProvider.refresh(eq(mockConnection))).thenReturn(cachedTopology); + when(rdsHostListProvider.refresh(eq(mockConnection))).thenReturn(cachedTopology); - assertNull(auroraHostListProvider.identifyConnection(mockConnection)); + assertNull(rdsHostListProvider.identifyConnection(mockConnection)); } @Test @@ -507,23 +507,23 @@ void testIdentifyConnectionHostInTopology() throws SQLException { expectedHost.setHostId("instance-a-1"); final List cachedTopology = Collections.singletonList(expectedHost); - auroraHostListProvider = Mockito.spy(getAuroraHostListProvider( + rdsHostListProvider = Mockito.spy(getRdsHostListProvider( mockHostListProviderService, "jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(true); when(mockResultSet.getString(eq(1))).thenReturn("instance-a-1"); - when(auroraHostListProvider.refresh()).thenReturn(cachedTopology); + when(rdsHostListProvider.refresh()).thenReturn(cachedTopology); - final HostSpec actual = auroraHostListProvider.identifyConnection(mockConnection); + final HostSpec actual = rdsHostListProvider.identifyConnection(mockConnection); assertEquals("instance-a-1.xyz.us-east-2.rds.amazonaws.com", actual.getHost()); assertEquals("instance-a-1", actual.getHostId()); } @Test void testGetTopology_StaleRecord() throws SQLException { - auroraHostListProvider = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); - auroraHostListProvider.isInitialized = true; + rdsHostListProvider = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider.isInitialized = true; final String hostName1 = "hostName1"; final String hostName2 = "hostName2"; @@ -547,17 +547,17 @@ void testGetTopology_StaleRecord() throws SQLException { .lastUpdateTime(secondTimestamp) .build(); - final FetchTopologyResult result = auroraHostListProvider.getTopology(mockConnection, true); - verify(auroraHostListProvider, atMostOnce()).queryForTopology(mockConnection); + final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, true); + verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); assertEquals(1, result.hosts.size()); assertEquals(expectedWriter, result.hosts.get(0)); } @Test void testGetTopology_InvalidLastUpdatedTimestamp() throws SQLException { - auroraHostListProvider = Mockito.spy( - getAuroraHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); - auroraHostListProvider.isInitialized = true; + rdsHostListProvider = Mockito.spy( + getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider.isInitialized = true; final String hostName = "hostName"; final Float cpuUtilization = 11.1F; @@ -569,8 +569,8 @@ void testGetTopology_InvalidLastUpdatedTimestamp() throws SQLException { when(mockResultSet.getFloat(4)).thenReturn(nodeLag); when(mockResultSet.getTimestamp(5)).thenThrow(WrongArgumentException.class); - final FetchTopologyResult result = auroraHostListProvider.getTopology(mockConnection, true); - verify(auroraHostListProvider, atMostOnce()).queryForTopology(mockConnection); + final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, true); + verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); final String expectedLastUpdatedTimeStampRounded = Timestamp.from(Instant.now()).toString().substring(0, 16); assertEquals(1, result.hosts.size()); diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/RoundRobinHostSelectorTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/RoundRobinHostSelectorTest.java new file mode 100644 index 000000000..b0b65a075 --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/RoundRobinHostSelectorTest.java @@ -0,0 +1,357 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.plugin; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.jdbc.HostRole; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.RoundRobinHostSelector; +import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; + +public class RoundRobinHostSelectorTest { + private static final int TEST_PORT = 5432; + private static Properties defaultProps; + private static Properties weightedProps; + + private final HostSpec writerHostSpec = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("instance-0").port(TEST_PORT).build(); + private final HostSpec readerHostSpec1 = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("instance-1").port(TEST_PORT).role(HostRole.READER).build(); + private final HostSpec readerHostSpec2 = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("instance-2").port(TEST_PORT).role(HostRole.READER).build(); + private final HostSpec readerHostSpec3 = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("instance-3").port(TEST_PORT).role(HostRole.READER).build(); + private final HostSpec readerHostSpec4 = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("instance-4").port(TEST_PORT).role(HostRole.READER).build(); + + // Each number at the end of the host list represents which readers have been added. + private final List hostsList123 = Arrays.asList( + writerHostSpec, + readerHostSpec2, + readerHostSpec3, + readerHostSpec1); + + private final List hostsList1234 = Arrays.asList( + writerHostSpec, + readerHostSpec4, + readerHostSpec2, + readerHostSpec3, + readerHostSpec1); + private final List hostsList13 = Arrays.asList( + writerHostSpec, + readerHostSpec3, + readerHostSpec1); + private final List hostsList14 = Arrays.asList( + writerHostSpec, + readerHostSpec4, + readerHostSpec1); + private final List hostsList23 = Arrays.asList( + writerHostSpec, + readerHostSpec3, + readerHostSpec2); + private final List writerHostsList = Collections.singletonList(writerHostSpec); + private static RoundRobinHostSelector roundRobinHostSelector; + + @BeforeEach + public void setUp() { + roundRobinHostSelector = new RoundRobinHostSelector(); + defaultProps = new Properties(); + weightedProps = new Properties(); + final String hostWeights = + "instance-0:1," + + "instance-1:3," + + "instance-2:2," + + "instance-3:1"; + weightedProps.put(RoundRobinHostSelector.ROUND_ROBIN_HOST_WEIGHT_PAIRS.name, hostWeights); + } + + @AfterEach + public void cleanUp() { + roundRobinHostSelector.clearCache(); + } + + @Test + public void testSetup_EmptyHost() { + final String hostWeights = + "instance-0:1," + + ":3," + + "instance-2:2," + + "instance-3:3"; + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_HOST_WEIGHT_PAIRS.name, hostWeights); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_EmptyWeight() { + final String hostWeights = + "instance-0:1," + + "instance-1:," + + "instance-2:2," + + "instance-3:3"; + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_HOST_WEIGHT_PAIRS.name, hostWeights); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_ZeroWeight() { + final String hostWeights = + "instance-0:1," + + "instance-1:0," + + "instance-2:2," + + "instance-3:3"; + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_HOST_WEIGHT_PAIRS.name, hostWeights); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_ZeroDefaultWeight() { + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_DEFAULT_WEIGHT.name, "0"); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_BadWeightFormat() { + final String hostWeights = + "instance-0:1," + + "instance-1:1:3," + + "instance-2:2," + + "instance-3:3"; + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_HOST_WEIGHT_PAIRS.name, hostWeights); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_FloatWeights() { + final String hostWeights = + "instance-0:1," + + "instance-1:1.123," + + "instance-2:2.456," + + "instance-3:3.789"; + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_HOST_WEIGHT_PAIRS.name, hostWeights); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_FloatDefaultWeight() { + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_DEFAULT_WEIGHT.name, "1.123"); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_NegativeWeights() { + final String hostWeights = + "instance-0:1," + + "instance-1:-1," + + "instance-2:-2," + + "instance-3:-3"; + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_HOST_WEIGHT_PAIRS.name, hostWeights); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_NegativeDefaultWeight() { + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_DEFAULT_WEIGHT.name, "-1"); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_ParseWeightError() { + final String hostWeights = "instance-0:1,instance-1:1a"; + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_HOST_WEIGHT_PAIRS.name, hostWeights); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testSetup_ParseDefaultWeightError() { + defaultProps.put(RoundRobinHostSelector.ROUND_ROBIN_DEFAULT_WEIGHT.name, "1a"); + assertThrows( + SQLException.class, + () -> roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testGetHost_NoReaders() { + assertThrows(SQLException.class, + () -> roundRobinHostSelector.getHost(writerHostsList, HostRole.READER, defaultProps)); + } + + @Test + public void testGetHost() throws SQLException { + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec3.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testGetHostNullProperties() throws SQLException { + defaultProps = null; + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec3.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testGetHost_Weighted() throws SQLException { + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, weightedProps).getHost()); + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, weightedProps).getHost()); + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, weightedProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, weightedProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, weightedProps).getHost()); + assertEquals( + readerHostSpec3.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, weightedProps).getHost()); + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, weightedProps).getHost()); + } + + @Test + public void testGetHost_CacheEntryExpired() throws SQLException { + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + + roundRobinHostSelector.clearCache(); + + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testGetHost_ScaleUp() throws SQLException { + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec3.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec4.getHost(), + roundRobinHostSelector.getHost(hostsList1234, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testGetHost_ScaleDown() throws SQLException { + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec3.getHost(), + roundRobinHostSelector.getHost(hostsList13, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList13, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testGetHost_LastHostNotInHostsList() throws SQLException { + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList123, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList13, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec3.getHost(), + roundRobinHostSelector.getHost(hostsList13, HostRole.READER, defaultProps).getHost()); + } + + @Test + public void testGetHost_AllHostsChanged() throws SQLException { + assertEquals( + readerHostSpec1.getHost(), + roundRobinHostSelector.getHost(hostsList14, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec2.getHost(), + roundRobinHostSelector.getHost(hostsList23, HostRole.READER, defaultProps).getHost()); + assertEquals( + readerHostSpec4.getHost(), + roundRobinHostSelector.getHost(hostsList14, HostRole.READER, defaultProps).getHost()); + } +} diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/ConcurrencyTests.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/ConcurrencyTests.java index 5deb3fca8..e73a5a6bd 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/ConcurrencyTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/ConcurrencyTests.java @@ -57,6 +57,7 @@ import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import software.amazon.jdbc.ConnectionPlugin; +import software.amazon.jdbc.ConnectionProvider; import software.amazon.jdbc.HostListProvider; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; @@ -313,7 +314,6 @@ public HostRole getHostRole(Connection conn) { @Override public void setAvailability(Set hostAliases, HostAvailability availability) { - } @Override @@ -404,6 +404,21 @@ public void fillAliases(Connection connection, HostSpec hostSpec) throws SQLExce public HostSpecBuilder getHostSpecBuilder() { return new HostSpecBuilder(new SimpleHostAvailabilityStrategy()); } + + @Override + public ConnectionProvider getConnectionProvider() { + return null; + } + + @Override + public String getDriverProtocol() { + return null; + } + + @Override + public Properties getProperties() { + return null; + } } public static class TestConnection implements Connection { diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedMonitorThreadContainerTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedMonitorThreadContainerTest.java new file mode 100644 index 000000000..dcc13fa05 --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedMonitorThreadContainerTest.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.plugin.efm; + +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.RepeatedTest; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class MultiThreadedMonitorThreadContainerTest { + + @Mock ExecutorServiceInitializer mockExecutorServiceInitializer; + @Mock ExecutorService mockExecutorService; + + private AutoCloseable closeable; + + @BeforeEach + void init() { + closeable = MockitoAnnotations.openMocks(this); + when(mockExecutorServiceInitializer.createExecutorService()).thenReturn(mockExecutorService); + } + + @AfterEach + void cleanup() throws Exception { + closeable.close(); + MonitorThreadContainer.releaseInstance(); + } + + @RepeatedTest(value = 1000, name = "MonitorThreadContainer ThreadPoolExecutor is not closed prematurely") + void testThreadPoolExecutorNotClosedPrematurely() throws InterruptedException { + MonitorThreadContainer.getInstance(mockExecutorServiceInitializer); + + ExecutorService executorService = Executors.newCachedThreadPool(); + executorService.execute(() -> MonitorThreadContainer.getInstance(mockExecutorServiceInitializer)); + Thread.sleep(3); + executorService.execute(MonitorThreadContainer::releaseInstance); + executorService.shutdown(); + + verify(mockExecutorService, times(0)).shutdownNow(); + } +} diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPluginTest.java index 63498a3ba..40aa20c38 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPluginTest.java @@ -23,11 +23,14 @@ import static org.mockito.AdditionalMatchers.not; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import com.zaxxer.hikari.HikariConfig; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -42,6 +45,7 @@ import org.junit.jupiter.api.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import software.amazon.jdbc.HikariPooledConnectionProvider; import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; @@ -50,6 +54,7 @@ import software.amazon.jdbc.NodeChangeOptions; import software.amazon.jdbc.OldConnectionSuggestedAction; import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; @@ -527,22 +532,99 @@ public void testExecuteClearWarningsOnClosedConnectionsIsNotCalled() throws SQLE @Test public void testExecuteClearWarningsOnNullConnectionsIsNotCalled() throws SQLException { final ReadWriteSplittingPlugin plugin = new ReadWriteSplittingPlugin( - mockPluginService, - defaultProps, - mockHostListProviderService, - null, - null); + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null); // calling clearWarnings() on nullified connection would throw an exception assertDoesNotThrow(() -> { plugin.execute( - ResultSet.class, - SQLException.class, - mockStatement, - "Connection.clearWarnings", - mockSqlFunction, - new Object[] {} + ResultSet.class, + SQLException.class, + mockStatement, + "Connection.clearWarnings", + mockSqlFunction, + new Object[] {} ); }); } + + @Test + public void testClosePooledReaderConnectionAfterSetReadOnly() throws SQLException { + doReturn(writerHostSpec) + .doReturn(writerHostSpec) + .doReturn(readerHostSpec1) + .when(this.mockPluginService).getCurrentHostSpec(); + doReturn(mockReaderConn1).when(mockPluginService).connect(readerHostSpec1, null); + when(mockPluginService.getDriverProtocol()).thenReturn("jdbc:postgresql://"); + + final HikariPooledConnectionProvider connProvider = + new HikariPooledConnectionProvider( + ReadWriteSplittingPluginTest::getHikariConfig, + ReadWriteSplittingPluginTest::getPoolKey + ); + when(mockPluginService.getConnectionProvider()).thenReturn(connProvider); + + final ReadWriteSplittingPlugin plugin = new ReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + null); + final ReadWriteSplittingPlugin spyPlugin = spy(plugin); + + spyPlugin.switchConnectionIfRequired(true); + spyPlugin.switchConnectionIfRequired(false); + + verify(spyPlugin, times(1)).closeConnectionIfIdle(eq(mockReaderConn1)); + } + + @Test + public void testClosePooledWriterConnectionAfterSetReadOnly() throws SQLException { + doReturn(writerHostSpec) + .doReturn(writerHostSpec) + .doReturn(readerHostSpec1) + .doReturn(readerHostSpec1) + .doReturn(writerHostSpec) + .when(this.mockPluginService).getCurrentHostSpec(); + doReturn(mockWriterConn).when(mockPluginService).connect(writerHostSpec, null); + when(mockPluginService.getDriverProtocol()).thenReturn("jdbc:postgresql://"); + + final HikariPooledConnectionProvider connProvider = + new HikariPooledConnectionProvider( + ReadWriteSplittingPluginTest::getHikariConfig, + ReadWriteSplittingPluginTest::getPoolKey + ); + when(mockPluginService.getConnectionProvider()).thenReturn(connProvider); + + final ReadWriteSplittingPlugin plugin = new ReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null); + final ReadWriteSplittingPlugin spyPlugin = spy(plugin); + + spyPlugin.switchConnectionIfRequired(true); + spyPlugin.switchConnectionIfRequired(false); + spyPlugin.switchConnectionIfRequired(true); + + verify(spyPlugin, times(1)).closeConnectionIfIdle(eq(mockWriterConn)); + } + + private static HikariConfig getHikariConfig(HostSpec hostSpec, Properties props) { + final HikariConfig config = new HikariConfig(); + config.setMaximumPoolSize(3); + config.setInitializationFailTimeout(75000); + config.setConnectionTimeout(10000); + return config; + } + + private static String getPoolKey(HostSpec hostSpec, Properties props) { + final String user = props.getProperty(PropertyDefinition.USER.name); + final String somePropertyValue = props.getProperty("somePropertyValue"); + return hostSpec.getUrl() + user + somePropertyValue; + } } diff --git a/wrapper/src/test/resources/hibernate_files/hibernate-core.gradle b/wrapper/src/test/resources/hibernate_files/hibernate-core.gradle index 758d38404..96dd4f542 100644 --- a/wrapper/src/test/resources/hibernate_files/hibernate-core.gradle +++ b/wrapper/src/test/resources/hibernate_files/hibernate-core.gradle @@ -61,7 +61,7 @@ dependencies { transitive = true } testImplementation "joda-time:joda-time:2.3" - testImplementation files('/app/libs/aws-advanced-jdbc-wrapper-2.2.3.jar') + testImplementation files('/app/libs/aws-advanced-jdbc-wrapper-2.2.4.jar') testImplementation dbLibs.postgresql testImplementation dbLibs.mysql testImplementation dbLibs.h2 diff --git a/wrapper/src/test/resources/hibernate_files/java-module.gradle b/wrapper/src/test/resources/hibernate_files/java-module.gradle index b1812f48d..fd39c3434 100644 --- a/wrapper/src/test/resources/hibernate_files/java-module.gradle +++ b/wrapper/src/test/resources/hibernate_files/java-module.gradle @@ -97,7 +97,7 @@ dependencies { // Since both the DB2 driver and HANA have a package "net.jpountz" we have to add dependencies conditionally // This is due to the "no split-packages" requirement of Java 9+ - testRuntimeOnly files('/app/libs/aws-advanced-jdbc-wrapper-2.2.3.jar') + testRuntimeOnly files('/app/libs/aws-advanced-jdbc-wrapper-2.2.4.jar') testRuntimeOnly dbLibs.mysql if ( db.startsWith( 'db2' ) ) {