From 43bbe1ac708730827290b1252f4c52dfa074d4df Mon Sep 17 00:00:00 2001 From: Lucas Kent Date: Thu, 28 Nov 2024 12:09:29 +1100 Subject: [PATCH 1/2] Redis to valkey (examples) --- .../config/docker-compose.yaml | 10 +- docs/src/SUMMARY.md | 6 +- docs/src/examples/redis-clustering-aware.md | 56 ++++---- docs/src/examples/redis-clustering-unaware.md | 131 +++--------------- docs/src/user-guide/getting-started.md | 2 +- docs/src/user-guide/introduction.md | 15 +- .../valkey-cache/docker-compose.yaml | 8 +- .../log-to-file/docker-compose.yaml | 7 +- .../tests/transforms/log_to_file.rs | 10 +- .../valkey_int_tests/basic_driver_tests.rs | 11 +- shotover-proxy/tests/valkey_int_tests/mod.rs | 2 +- shotover/src/frame/mod.rs | 2 +- shotover/src/lib.rs | 2 +- 13 files changed, 76 insertions(+), 186 deletions(-) diff --git a/custom-transforms-example/config/docker-compose.yaml b/custom-transforms-example/config/docker-compose.yaml index 24483c51f..f8891e78b 100644 --- a/custom-transforms-example/config/docker-compose.yaml +++ b/custom-transforms-example/config/docker-compose.yaml @@ -1,8 +1,8 @@ services: - redis-one: - image: library/redis:5.0.9 + valkey-one: + image: bitnami/valkey:7.2.5-debian-12-r9 ports: - "1111:6379" - volumes: - - ./redis.conf:/usr/local/etc/redis/redis.conf - command: [ "redis-server", "/usr/local/etc/redis/redis.conf" ] + environment: + ALLOW_EMPTY_PASSWORD: "yes" + VALKEY_TLS_ENABLED: "no" diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 2e704bfab..a6cb0c44c 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -11,9 +11,9 @@ - [Sources](./sources.md) - [Transforms](./transforms.md) - [Examples]() - - [Redis Cluster]() - - [Unaware client](./examples/redis-clustering-unaware.md) - - [Aware client](./examples/redis-clustering-aware.md) + - [Valkey Cluster]() + - [Unaware client](./examples/valkey-clustering-unaware.md) + - [Aware client](./examples/valkey-clustering-aware.md) - [Cassandra Cluster]() - [Shotover sidecars](./examples/cassandra-cluster-shotover-sidecar.md) - [Contributing](./dev-docs/contributing.md) diff --git a/docs/src/examples/redis-clustering-aware.md b/docs/src/examples/redis-clustering-aware.md index c78e31039..60547be85 100644 --- a/docs/src/examples/redis-clustering-aware.md +++ b/docs/src/examples/redis-clustering-aware.md @@ -1,10 +1,10 @@ -# Redis Clustering with cluster aware client +# Valkey Clustering with cluster aware client -The following guide shows you how to configure Shotover to support proxying Redis cluster *aware* clients to [Redis cluster](https://redis.io/topics/cluster-spec). +The following guide shows you how to configure Shotover to support proxying Valkey cluster *aware* clients to [Valkey cluster](https://valkey.io/topics/cluster-spec). ## Overview -In this example, we will be connecting to a Redis cluster that has the following topology: +In this example, we will be connecting to a Valkey cluster that has the following topology: * `172.16.1.2:6379` * `172.16.1.3:6379` @@ -13,31 +13,31 @@ In this example, we will be connecting to a Redis cluster that has the following * `172.16.1.6:6379` * `172.16.1.7:6379` -Shotover will be deployed as a sidecar to each node in the Redis cluster, listening on `6380`. Use the following [docker-compose.yaml](https://github.com/shotover/shotover-examples/blob/main/redis-cluster-1-1/docker-compose.yaml) to run the Redis cluster and Shotover sidecars. +Shotover will be deployed as a sidecar to each node in the Valkey cluster, listening on `6380`. Use the following [docker-compose.yaml](https://github.com/shotover/shotover-examples/blob/main/valkey-cluster-1-1/docker-compose.yaml) to run the Valkey cluster and Shotover sidecars. ```console -curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/docker-compose.yaml --output docker-compose.yaml +curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/docker-compose.yaml --output docker-compose.yaml ``` -Below we can see an example of a Redis node and it's Shotover sidecar. Notice they are running on the same network address (`172.16.1.2`) and the present directory is being mounted to allow Shotover to access the config and topology files. +Below we can see an example of a Valkey node and it's Shotover sidecar. Notice they are running on the same network address (`172.16.1.2`) and the present directory is being mounted to allow Shotover to access the config and topology files. ```YAML -redis-node-0: - image: bitnami/redis-cluster:6.2.12-debian-11-r26 +valkey-node-0: + image: bitnami/valkey-cluster:7.2.5-debian-12-r4 networks: cluster_subnet: ipv4_address: 172.16.1.2 environment: - 'ALLOW_EMPTY_PASSWORD=yes' - - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2' + - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2' shotover-0: restart: always depends_on: - - redis-node-0 + - valkey-node-0 image: shotover/shotover-proxy - network_mode: "service:redis-node-0" + network_mode: "service:valkey-node-0" volumes: - type: bind source: $PWD @@ -45,37 +45,37 @@ shotover-0: ``` -In this example we will use `redis-benchmark` with cluster mode enabled as our Redis cluster aware client application. +In this example we will use `valkey-benchmark` with cluster mode enabled as our Valkey cluster aware client application. ## Configuration -First we will modify our `topology.yaml` file to have a single Redis source. This will: +First we will modify our `topology.yaml` file to have a single Valkey source. This will: -* Define how Shotover listens for incoming connections from our client application (`redis-benchmark`). -* Configure Shotover to connect to the Redis node via our defined remote address. -* Configure Shotover to rewrite all Redis ports with our Shotover port when the cluster aware driver is talking to the cluster, through Shotover. -* Connect our Redis Source to our Redis cluster sink (transform). +* Define how Shotover listens for incoming connections from our client application (`valkey-benchmark`). +* Configure Shotover to connect to the Valkey node via our defined remote address. +* Configure Shotover to rewrite all Valkey ports with our Shotover port when the cluster aware driver is talking to the cluster, through Shotover. +* Connect our Valkey Source to our Valkey cluster sink (transform). ```yaml --- sources: - - Redis: - name: "redis" + - Valkey: + name: "valkey" listen_addr: "0.0.0.0:6380" chain: - - RedisClusterPortsRewrite: + - ValkeyClusterPortsRewrite: new_port: 6380 - - RedisSinkSingle: + - ValkeySinkSingle: remote_address: "0.0.0.0:6379" connect_timeout_ms: 3000 ``` Modify an existing `topology.yaml` or create a new one and place the above example as the file's contents. -You will also need a [config.yaml](https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/config.yaml) to run Shotover. +You will also need a [config.yaml](https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/config.yaml) to run Shotover. ```shell -curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/config.yaml --output config.yaml +curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/config.yaml --output config.yaml ``` ## Starting @@ -90,13 +90,13 @@ docker-compose up -d With everything now up and running, we can test out our client application. Let's start it up! -First we will run `redis-benchmark` directly on our cluster. +First we will run `valkey-benchmark` directly on our cluster. ```console -redis-benchmark -h 172.16.1.2 -p 6379 -t set,get --cluster +valkey-benchmark -h 172.16.1.2 -p 6379 -t set,get --cluster ``` -If everything works correctly you should see the following, along with the benchmark results which have been omitted for brevity. Notice all traffic is going through the Redis port on `6379`. +If everything works correctly you should see the following, along with the benchmark results which have been omitted for brevity. Notice all traffic is going through the Valkey port on `6379`. ```console Cluster has 3 master nodes: @@ -109,10 +109,10 @@ Master 2: 04b301f1b165d81d5fb86e50312e9cc4898cbcce 172.16.1.4:6379 Now run it again but on the Shotover port this time. ```console -redis-benchmark -h 172.16.1.2 -p 6380 -t set,get --cluster +valkey-benchmark -h 172.16.1.2 -p 6380 -t set,get --cluster ``` -You should see the following, notice that all traffic is going through Shotover on `6380` instead of the Redis port of `6379`: +You should see the following, notice that all traffic is going through Shotover on `6380` instead of the Valkey port of `6379`: ```console Cluster has 3 master nodes: diff --git a/docs/src/examples/redis-clustering-unaware.md b/docs/src/examples/redis-clustering-unaware.md index e80498caf..140d1a108 100644 --- a/docs/src/examples/redis-clustering-unaware.md +++ b/docs/src/examples/redis-clustering-unaware.md @@ -1,21 +1,21 @@ -# Redis Clustering +# Valkey Clustering -The following guide shows you how to configure Shotover Proxy to support transparently proxying Redis cluster _unaware_ clients to a [Redis cluster](https://redis.io/topics/cluster-spec). +The following guide shows you how to configure Shotover Proxy to support transparently proxying Valkey cluster _unaware_ clients to a [Valkey cluster](https://valkey.io/topics/cluster-spec). ## General Configuration -First you need to setup a Redis cluster and Shotover. +First you need to setup a Valkey cluster and Shotover. -The easiest way to do this is with this example [docker-compose.yaml](https://github.com/shotover/shotover-examples/blob/main/redis-cluster-1-many/docker-compose.yaml) +The easiest way to do this is with this example [docker-compose.yaml](https://github.com/shotover/shotover-examples/blob/main/valkey-cluster-1-many/docker-compose.yaml) You should first inspect the `docker-compose.yaml` to understand what the cluster looks like and how its exposed to the network. Then run: ```shell -curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-many/docker-compose.yaml --output docker-compose.yaml +curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-many/docker-compose.yaml --output docker-compose.yaml ``` -Alternatively you could spin up a hosted Redis cluster on [any cloud provider that provides it](https://www.instaclustr.com/products/managed-redis). +Alternatively you could spin up a hosted Valkey cluster on [any cloud provider that provides it](https://www.instaclustr.com/products/managed-valkey). This more accurately reflects a real production use but will take a bit more setup. And reduce the docker-compose.yaml to just the shotover part @@ -44,13 +44,13 @@ networks: ```yaml --- sources: - - Redis: - name: "redis" - # define where shotover listens for incoming connections from our client application (`redis-benchmark`). + - Valkey: + name: "valkey" + # define where shotover listens for incoming connections from our client application (`valkey-benchmark`). listen_addr: "0.0.0.0:6379" chain: - # configure Shotover to connect to the Redis cluster via our defined contact points - - RedisSinkCluster: + # configure Shotover to connect to the Valkey cluster via our defined contact points + - ValkeySinkCluster: first_contact_points: - "172.16.1.2:6379" - "172.16.1.3:6379" @@ -63,12 +63,12 @@ sources: Modify an existing `topology.yaml` or create a new one and place the above example as the file's contents. -If you didnt use the standard `docker-compose.yaml` setup then you will need to change `first_contact_points` to point to the Redis instances you used. +If you didnt use the standard `docker-compose.yaml` setup then you will need to change `first_contact_points` to point to the Valkey instances you used. -You will also need a [config.yaml](https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/config.yaml) to run Shotover. +You will also need a [config.yaml](https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/config.yaml) to run Shotover. ```shell -curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/config.yaml --output config.yaml +curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/config.yaml --output config.yaml ``` ## Starting @@ -81,107 +81,8 @@ docker-compose up -d ## Testing -With your Redis Cluster and Shotover now up and running, we can test out our client application. Let's start it up! +With your Valkey Cluster and Shotover now up and running, we can test out our client application. Let's start it up! ```console -redis-benchmark -h 172.16.1.9 -t set,get -``` - -Running against local containerised Redis instances on a Ryzen 9 3900X we get the following: - -```console -user@demo ~$ redis-benchmark -t set,get -====== SET ====== - 100000 requests completed in 0.69 seconds - 50 parallel clients - 3 bytes payload - keep alive: 1 - host configuration "save": - host configuration "appendonly": - multi-thread: no - -Latency by percentile distribution: -0.000% <= 0.079 milliseconds (cumulative count 2) -50.000% <= 0.215 milliseconds (cumulative count 51352) -75.000% <= 0.231 milliseconds (cumulative count 79466) -87.500% <= 0.247 milliseconds (cumulative count 91677) -93.750% <= 0.255 milliseconds (cumulative count 94319) -96.875% <= 0.271 milliseconds (cumulative count 97011) -98.438% <= 0.303 milliseconds (cumulative count 98471) -99.219% <= 0.495 milliseconds (cumulative count 99222) -99.609% <= 0.615 milliseconds (cumulative count 99613) -99.805% <= 0.719 milliseconds (cumulative count 99806) -99.902% <= 0.791 milliseconds (cumulative count 99908) -99.951% <= 0.919 milliseconds (cumulative count 99959) -99.976% <= 0.967 milliseconds (cumulative count 99976) -99.988% <= 0.991 milliseconds (cumulative count 99992) -99.994% <= 1.007 milliseconds (cumulative count 99995) -99.997% <= 1.015 milliseconds (cumulative count 99998) -99.998% <= 1.023 milliseconds (cumulative count 99999) -99.999% <= 1.031 milliseconds (cumulative count 100000) -100.000% <= 1.031 milliseconds (cumulative count 100000) - -Cumulative distribution of latencies: -0.007% <= 0.103 milliseconds (cumulative count 7) -33.204% <= 0.207 milliseconds (cumulative count 33204) -98.471% <= 0.303 milliseconds (cumulative count 98471) -99.044% <= 0.407 milliseconds (cumulative count 99044) -99.236% <= 0.503 milliseconds (cumulative count 99236) -99.571% <= 0.607 milliseconds (cumulative count 99571) -99.793% <= 0.703 milliseconds (cumulative count 99793) -99.926% <= 0.807 milliseconds (cumulative count 99926) -99.949% <= 0.903 milliseconds (cumulative count 99949) -99.995% <= 1.007 milliseconds (cumulative count 99995) -100.000% <= 1.103 milliseconds (cumulative count 100000) - -Summary: - throughput summary: 144092.22 requests per second - latency summary (msec): - avg min p50 p95 p99 max - 0.222 0.072 0.215 0.263 0.391 1.031 -====== GET ====== - 100000 requests completed in 0.69 seconds - 50 parallel clients - 3 bytes payload - keep alive: 1 - host configuration "save": - host configuration "appendonly": - multi-thread: no - -Latency by percentile distribution: -0.000% <= 0.079 milliseconds (cumulative count 1) -50.000% <= 0.215 milliseconds (cumulative count 64586) -75.000% <= 0.223 milliseconds (cumulative count 77139) -87.500% <= 0.239 milliseconds (cumulative count 90521) -93.750% <= 0.255 milliseconds (cumulative count 94985) -96.875% <= 0.287 milliseconds (cumulative count 97262) -98.438% <= 0.311 milliseconds (cumulative count 98588) -99.219% <= 0.367 milliseconds (cumulative count 99232) -99.609% <= 0.495 milliseconds (cumulative count 99613) -99.805% <= 0.583 milliseconds (cumulative count 99808) -99.902% <= 0.631 milliseconds (cumulative count 99913) -99.951% <= 0.647 milliseconds (cumulative count 99955) -99.976% <= 0.663 milliseconds (cumulative count 99978) -99.988% <= 0.679 milliseconds (cumulative count 99990) -99.994% <= 0.703 milliseconds (cumulative count 99995) -99.997% <= 0.711 milliseconds (cumulative count 99997) -99.998% <= 0.751 milliseconds (cumulative count 99999) -99.999% <= 0.775 milliseconds (cumulative count 100000) -100.000% <= 0.775 milliseconds (cumulative count 100000) - -Cumulative distribution of latencies: -0.009% <= 0.103 milliseconds (cumulative count 9) -48.520% <= 0.207 milliseconds (cumulative count 48520) -98.179% <= 0.303 milliseconds (cumulative count 98179) -99.358% <= 0.407 milliseconds (cumulative count 99358) -99.626% <= 0.503 milliseconds (cumulative count 99626) -99.867% <= 0.607 milliseconds (cumulative count 99867) -99.995% <= 0.703 milliseconds (cumulative count 99995) -100.000% <= 0.807 milliseconds (cumulative count 100000) - -Summary: - throughput summary: 143884.89 requests per second - latency summary (msec): - avg min p50 p95 p99 max - 0.214 0.072 0.215 0.263 0.335 0.775 +valkey-benchmark -h 172.16.1.9 -t set,get ``` diff --git a/docs/src/user-guide/getting-started.md b/docs/src/user-guide/getting-started.md index b4055883a..cf495a5ee 100644 --- a/docs/src/user-guide/getting-started.md +++ b/docs/src/user-guide/getting-started.md @@ -17,4 +17,4 @@ To see Shotover's command line arguments run: `./shotover-proxy --help` Full `topology.yaml` examples configured for a specific use case: -* [Redis clustering](../examples/redis-clustering-unaware.md) +* [valkey clustering](../examples/valkey-clustering-unaware.md) diff --git a/docs/src/user-guide/introduction.md b/docs/src/user-guide/introduction.md index fd1e79e48..5c473af0b 100644 --- a/docs/src/user-guide/introduction.md +++ b/docs/src/user-guide/introduction.md @@ -22,7 +22,7 @@ Shotover aims to make these challenges simpler by providing a point where data l Longer term, Shotover can also leverage the same capability to make operational tasks easier to solve a number of other challenges that come with working multiple databases. Some of these include: * Data encryption at the field level, with a common key management scheme between databases. -* Routing the same data to databases that provide different query capabilities or performance characteristics (e.g. indexing data in Redis in Elasticsearch, easy caching of DynamoDB data in Redis). +* Routing the same data to databases that provide different query capabilities or performance characteristics (e.g. indexing data in Valkey in Elasticsearch, easy caching of DynamoDB data in Valkey). * Routing/replicating data across regions for databases that don't support it natively or the functionality is gated behind proprietary "open-core" implementations. * A common audit and AuthZ/AuthN point for SOX/PCI/HIPAA compliance. @@ -38,18 +38,11 @@ Shotover prioritises the following principals in the order listed: Shotover provides a set of predefined transforms that can modify, route and control queries from any number of sources to a similar number of sinks. As the user you can construct chains of these transforms to achieve the behaviour required. Each chain can then be attached to a "source" that speaks the native protocol of you chosen database. The transform chain will process each request with access to a unified/simplified representation of a generic query, the original raw query and optionally (for SQL like protocols) a parsed AST representing the query. - - Shotover proxy currently supports the following protocols as sources: -* Cassandra (CQLv4) -* Redis (RESP2) +* Cassandra (CQL4 + CQL5) +* Valkey/Redis (RESP2) +* Kafka (Kafka Wire Protocol) ## Shotover performance diff --git a/shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml b/shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml index fc0ee6f37..a09aaa327 100644 --- a/shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml +++ b/shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml @@ -1,8 +1,12 @@ services: - redis-one: - image: library/redis:5.0.9 + valkey-one: + image: bitnami/valkey:7.2.5-debian-12-r9 ports: - "6379:6379" + environment: + ALLOW_EMPTY_PASSWORD: "yes" + VALKEY_TLS_ENABLED: "no" + cassandra-one: image: shotover/cassandra-test:4.0.6-r1 ports: diff --git a/shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml b/shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml index 7acef8da1..f8891e78b 100644 --- a/shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml +++ b/shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml @@ -1,5 +1,8 @@ services: - redis-one: - image: library/redis:5.0.9 + valkey-one: + image: bitnami/valkey:7.2.5-debian-12-r9 ports: - "1111:6379" + environment: + ALLOW_EMPTY_PASSWORD: "yes" + VALKEY_TLS_ENABLED: "no" diff --git a/shotover-proxy/tests/transforms/log_to_file.rs b/shotover-proxy/tests/transforms/log_to_file.rs index 55cd96085..76ab14462 100644 --- a/shotover-proxy/tests/transforms/log_to_file.rs +++ b/shotover-proxy/tests/transforms/log_to_file.rs @@ -20,20 +20,14 @@ async fn log_to_file() { "*4\r\n$6\r\nCLIENT\r\n$7\r\nSETINFO\r\n$8\r\nLIB-NAME\r\n$8\r\nredis-rs\r\n", ); let response = std::fs::read("message-log/1/responses/message1.bin").unwrap(); - assert_eq_string( - &response, - "-ERR Unknown subcommand or wrong number of arguments for 'SETINFO'. Try CLIENT HELP\r\n", - ); + assert_eq_string(&response, "+OK\r\n"); let request = std::fs::read("message-log/1/requests/message2.bin").unwrap(); assert_eq_string( &request, "*4\r\n$6\r\nCLIENT\r\n$7\r\nSETINFO\r\n$7\r\nLIB-VER\r\n$6\r\n0.24.0\r\n", ); let response = std::fs::read("message-log/1/responses/message2.bin").unwrap(); - assert_eq_string( - &response, - "-ERR Unknown subcommand or wrong number of arguments for 'SETINFO'. Try CLIENT HELP\r\n", - ); + assert_eq_string(&response, "+OK\r\n"); // SET sent by command assert_ok(redis::cmd("SET").arg("foo").arg(42), &mut connection).await; diff --git a/shotover-proxy/tests/valkey_int_tests/basic_driver_tests.rs b/shotover-proxy/tests/valkey_int_tests/basic_driver_tests.rs index b0f16acf7..f9e3722a5 100644 --- a/shotover-proxy/tests/valkey_int_tests/basic_driver_tests.rs +++ b/shotover-proxy/tests/valkey_int_tests/basic_driver_tests.rs @@ -1292,9 +1292,6 @@ pub async fn test_trigger_transform_failure_driver(client: &RedisClient) { } /// A raw variant of this test case is provided so that we can make a strong assertion about the way shotover handles this case. -/// -/// CAREFUL: This lacks any kind of check that shotover is ready, -/// so make sure shotover_manager.redis_connection is run on 6379 before calling this. pub async fn test_trigger_transform_failure_raw() { // Send invalid valkey command // To correctly handle this shotover should close the connection @@ -1305,7 +1302,7 @@ pub async fn test_trigger_transform_failure_raw() { connection.write_all(b"*1\r\n$4\r\nping\r\n").await.unwrap(); assert_eq!( - read_redis_message(&mut connection).await, + read_valkey_message(&mut connection).await, ValkeyFrame::Error(format!("ERR Internal shotover (or custom transform) bug: Chain failed to send and/or receive messages, the connection will now be closed. Caused by: 0: ValkeySinkSingle transform failed 1: Failed to connect to destination 127.0.0.1:1111 2: Connection refused (os error {CONNECTION_REFUSED_OS_ERROR})").into()) ); @@ -1319,7 +1316,7 @@ pub async fn test_trigger_transform_failure_raw() { assert_eq!(amount, 0); } -async fn read_redis_message(connection: &mut TcpStream) -> ValkeyFrame { +async fn read_valkey_message(connection: &mut TcpStream) -> ValkeyFrame { let mut buffer = BytesMut::new(); loop { if let Ok(Some((result, len))) = @@ -1336,8 +1333,6 @@ async fn read_redis_message(connection: &mut TcpStream) -> ValkeyFrame { } } -/// CAREFUL: This lacks any kind of check that shotover is ready, -/// so make sure shotover_manager.redis_connection is run on 6379 before calling this. pub async fn test_invalid_frame() { // Send invalid valkey command // To correctly handle this shotover should close the connection @@ -1346,7 +1341,7 @@ pub async fn test_invalid_frame() { .unwrap(); connection - .write_all(b"invalid_redis_frame\r\n") + .write_all(b"invalid_valkey_frame\r\n") .await .unwrap(); diff --git a/shotover-proxy/tests/valkey_int_tests/mod.rs b/shotover-proxy/tests/valkey_int_tests/mod.rs index a4eaaa0a7..30d3e4689 100644 --- a/shotover-proxy/tests/valkey_int_tests/mod.rs +++ b/shotover-proxy/tests/valkey_int_tests/mod.rs @@ -50,7 +50,7 @@ async fn passthrough_standard() { } #[tokio::test(flavor = "multi_thread")] -async fn passthrough_redis_down() { +async fn passthrough_valkey_down() { let shotover = shotover_process("tests/test-configs/valkey/passthrough/topology.yaml") .start() .await; diff --git a/shotover/src/frame/mod.rs b/shotover/src/frame/mod.rs index 71f7317ea..098a4a59b 100644 --- a/shotover/src/frame/mod.rs +++ b/shotover/src/frame/mod.rs @@ -60,7 +60,7 @@ impl MessageType { #[cfg(feature = "cassandra")] MessageType::Cassandra => "cql", #[cfg(feature = "valkey")] - MessageType::Valkey => "redis", + MessageType::Valkey => "valkey", #[cfg(feature = "kafka")] MessageType::Kafka => "kafka", #[cfg(feature = "opensearch")] diff --git a/shotover/src/lib.rs b/shotover/src/lib.rs index 63037e559..04dcbfe72 100644 --- a/shotover/src/lib.rs +++ b/shotover/src/lib.rs @@ -57,7 +57,7 @@ If we absolutely need unsafe code, it should be isolated within a separate small not(feature = "opensearch"), ))] compile_error!( - "At least one protocol feature must be enabled, e.g. `cassandra`, `redis`, `kafka` or `opensearch`" + "At least one protocol feature must be enabled, e.g. `cassandra`, `valkey`, `kafka` or `opensearch`" ); pub mod codec; From 43c0e0cb1ee629d3de744cc3d0b4d79c397c3a29 Mon Sep 17 00:00:00 2001 From: Lucas Kent Date: Thu, 28 Nov 2024 16:22:22 +1100 Subject: [PATCH 2/2] rename example files --- .../{redis-clustering-aware.md => valkey-clustering-aware.md} | 0 .../{redis-clustering-unaware.md => valkey-clustering-unaware.md} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename docs/src/examples/{redis-clustering-aware.md => valkey-clustering-aware.md} (100%) rename docs/src/examples/{redis-clustering-unaware.md => valkey-clustering-unaware.md} (100%) diff --git a/docs/src/examples/redis-clustering-aware.md b/docs/src/examples/valkey-clustering-aware.md similarity index 100% rename from docs/src/examples/redis-clustering-aware.md rename to docs/src/examples/valkey-clustering-aware.md diff --git a/docs/src/examples/redis-clustering-unaware.md b/docs/src/examples/valkey-clustering-unaware.md similarity index 100% rename from docs/src/examples/redis-clustering-unaware.md rename to docs/src/examples/valkey-clustering-unaware.md