diff --git a/.github/workflows/node.yml b/.github/workflows/node.yml index 1ed9488e01..a91a853534 100644 --- a/.github/workflows/node.yml +++ b/.github/workflows/node.yml @@ -250,10 +250,10 @@ jobs: - uses: actions/checkout@v4 - - name: Use Node.js 16.x + - name: Install Node.js uses: actions/setup-node@v4 with: - node-version: 16.x + node-version: 20.x - name: Build Node wrapper uses: ./.github/workflows/build-node-wrapper diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d809b3801..00f5925e21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,6 @@ #### Changes +* Node: alias commands added: FT.ALIASADD, FT.ALIADDEL, FT.ALIASUPDATE([#2596](https://github.com/valkey-io/valkey-glide/pull/2596)) +* Python code cleanup ([#2573](https://github.com/valkey-io/valkey-glide/pull/2573)) * Python: Python: FT.PROFILE command added ([#2543](https://github.com/valkey-io/valkey-glide/pull/2543)) * Python: Python: FT.AGGREGATE command added([#2530](https://github.com/valkey-io/valkey-glide/pull/2530)) * Python: Add JSON.OBJLEN command ([#2495](https://github.com/valkey-io/valkey-glide/pull/2495)) @@ -14,6 +16,7 @@ * Python: Add JSON.NUMINCRBY command ([#2448](https://github.com/valkey-io/valkey-glide/pull/2448)) * Python: Add JSON.NUMMULTBY command ([#2458](https://github.com/valkey-io/valkey-glide/pull/2458)) * Python: Add JSON.ARRINDEX command ([#2528](https://github.com/valkey-io/valkey-glide/pull/2528)) +* Python: Add `FT._LIST` command ([#2571](https://github.com/valkey-io/valkey-glide/pull/2571)) * Python: Add `JSON.DEBUG_MEMORY` and `JSON.DEBUG_FIELDS` commands ([#2481](https://github.com/valkey-io/valkey-glide/pull/2481)) * Java: Added `FT.CREATE` ([#2414](https://github.com/valkey-io/valkey-glide/pull/2414)) * Java: Added `FT.INFO` ([#2405](https://github.com/valkey-io/valkey-glide/pull/2441)) @@ -24,12 +27,14 @@ * Java: Added `JSON.SET` and `JSON.GET` ([#2462](https://github.com/valkey-io/valkey-glide/pull/2462)) * Node: Added `FT.CREATE` ([#2501](https://github.com/valkey-io/valkey-glide/pull/2501)) * Node: Added `FT.INFO` ([#2540](https://github.com/valkey-io/valkey-glide/pull/2540)) +* Node: Added `FT.AGGREGATE` ([#2554](https://github.com/valkey-io/valkey-glide/pull/2554)) * Java: Added `JSON.DEBUG` ([#2520](https://github.com/valkey-io/valkey-glide/pull/2520)) * Java: Added `JSON.ARRINSERT` and `JSON.ARRLEN` ([#2476](https://github.com/valkey-io/valkey-glide/pull/2476)) * Java: Added `JSON.ARRPOP` ([#2486](https://github.com/valkey-io/valkey-glide/pull/2486)) * Java: Added `JSON.OBJLEN` and `JSON.OBJKEYS` ([#2492](https://github.com/valkey-io/valkey-glide/pull/2492)) * Java: Added `JSON.DEL` and `JSON.FORGET` ([#2490](https://github.com/valkey-io/valkey-glide/pull/2490)) * Java: Added `FT.ALIASADD`, `FT.ALIASDEL`, `FT.ALIASUPDATE` ([#2442](https://github.com/valkey-io/valkey-glide/pull/2442)) +* Java: Added `FT._ALIASLIST` ([#2569](https://github.com/valkey-io/valkey-glide/pull/2569)) * Java: Added `FT.EXPLAIN`, `FT.EXPLAINCLI` ([#2515](https://github.com/valkey-io/valkey-glide/pull/2515)) * Core: Update routing for commands from server modules ([#2461](https://github.com/valkey-io/valkey-glide/pull/2461)) * Node: Added `JSON.SET` and `JSON.GET` ([#2427](https://github.com/valkey-io/valkey-glide/pull/2427)) @@ -43,11 +48,19 @@ * Java: Added `JSON.STRAPPEND` and `JSON.STRLEN` ([#2522](https://github.com/valkey-io/valkey-glide/pull/2522)) * Java: Added `JSON.CLEAR` ([#2519](https://github.com/valkey-io/valkey-glide/pull/2519)) * Node: Added `JSON.TYPE` ([#2510](https://github.com/valkey-io/valkey-glide/pull/2510)) +* Node: Added `JSON.ARRAPPEND` ([#2562](https://github.com/valkey-io/valkey-glide/pull/2562)) * Java: Added `JSON.RESP` ([#2513](https://github.com/valkey-io/valkey-glide/pull/2513)) * Java: Added `JSON.TYPE` ([#2525](https://github.com/valkey-io/valkey-glide/pull/2525)) +* Java: Added `FT._LIST` ([#2568](https://github.com/valkey-io/valkey-glide/pull/2568)) * Node: Added `FT.DROPINDEX` ([#2516](https://github.com/valkey-io/valkey-glide/pull/2516)) +* Node: Added `FT._LIST` ([#2570](https://github.com/valkey-io/valkey-glide/pull/2570)) * Node: Added `JSON.RESP` ([#2517](https://github.com/valkey-io/valkey-glide/pull/2517)) +* Node: Added `FT.EXPLAIN` and `FT.EXPLAINCLI` ([#2560](https://github.com/valkey-io/valkey-glide/pull/2560)) +* Node: Added `JSON.CLEAR` ([#2566](https://github.com/valkey-io/valkey-glide/pull/2566)) * Node: Added `JSON.ARRTRIM` ([#2550](https://github.com/valkey-io/valkey-glide/pull/2550)) +* Python: Add `JSON.STRAPPEND`, `JSON.STRLEN` commands ([#2372](https://github.com/valkey-io/valkey-glide/pull/2372)) +* Node: Added `JSON.ARRINDEX` ([#2559](https://github.com/valkey-io/valkey-glide/pull/2559)) +* Node: Added `JSON.OBJLEN` and `JSON.OBJKEYS` ([#2563](https://github.com/valkey-io/valkey-glide/pull/2563)) * Python: Add `JSON.STRAPPEND` , `JSON.STRLEN` commands ([#2372](https://github.com/valkey-io/valkey-glide/pull/2372)) * Python: Add `JSON.OBJKEYS` command ([#2395](https://github.com/valkey-io/valkey-glide/pull/2395)) * Python: Add `JSON.ARRINSERT` command ([#2464](https://github.com/valkey-io/valkey-glide/pull/2464)) @@ -58,6 +71,9 @@ * Node: Add `JSON.STRLEN` and `JSON.STRAPPEND` command ([#2537](https://github.com/valkey-io/valkey-glide/pull/2537)) * Node: Add `FT.SEARCH` ([#2551](https://github.com/valkey-io/valkey-glide/pull/2551)) * Python: Fix example ([#2556](https://github.com/valkey-io/valkey-glide/issues/2556)) +* Core: Add support for sending multi-slot JSON.MSET and JSON.MGET commands ([#2587]https://github.com/valkey-io/valkey-glide/pull/2587) +* Node: Add `JSON.DEBUG` command ([#2572](https://github.com/valkey-io/valkey-glide/pull/2572)) +* Node: Add `JSON.NUMINCRBY` and `JSON.NUMMULTBY` command ([#2555](https://github.com/valkey-io/valkey-glide/pull/2555)) #### Breaking Changes diff --git a/glide-core/redis-rs/redis/src/cluster.rs b/glide-core/redis-rs/redis/src/cluster.rs index 170cac47b3..2846fc1137 100644 --- a/glide-core/redis-rs/redis/src/cluster.rs +++ b/glide-core/redis-rs/redis/src/cluster.rs @@ -572,7 +572,7 @@ where let mut slots = self.slots.borrow_mut(); let results = match &routing { - MultipleNodeRoutingInfo::MultiSlot(routes) => { + MultipleNodeRoutingInfo::MultiSlot((routes, _)) => { self.execute_multi_slot(input, &mut slots, &mut connections, routes) } MultipleNodeRoutingInfo::AllMasters => { @@ -648,10 +648,11 @@ where .map(|res| res.map(|(_, val)| val)) .collect::>>()?; match routing { - MultipleNodeRoutingInfo::MultiSlot(vec) => { + MultipleNodeRoutingInfo::MultiSlot((vec, args_pattern)) => { crate::cluster_routing::combine_and_sort_array_results( results, - vec.iter().map(|(_, indices)| indices), + &vec, + &args_pattern, ) } _ => crate::cluster_routing::combine_array_results(results), diff --git a/glide-core/redis-rs/redis/src/cluster_async/mod.rs b/glide-core/redis-rs/redis/src/cluster_async/mod.rs index 5deba4b534..b1f9d6a560 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/mod.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/mod.rs @@ -1435,10 +1435,11 @@ where future::try_join_all(receivers.into_iter().map(get_receiver)) .await .and_then(|results| match routing { - MultipleNodeRoutingInfo::MultiSlot(vec) => { + MultipleNodeRoutingInfo::MultiSlot((vec, args_pattern)) => { crate::cluster_routing::combine_and_sort_array_results( results, - vec.iter().map(|(_, indices)| indices), + vec, + args_pattern, ) } _ => crate::cluster_routing::combine_array_results(results), @@ -1889,7 +1890,7 @@ where .all_primary_connections() .map(|tuple| Some((cmd.clone(), tuple))), ), - MultipleNodeRoutingInfo::MultiSlot(slots) => { + MultipleNodeRoutingInfo::MultiSlot((slots, _)) => { into_channels(slots.iter().map(|(route, indices)| { connections_container .connection_for_route(route) diff --git a/glide-core/redis-rs/redis/src/cluster_routing.rs b/glide-core/redis-rs/redis/src/cluster_routing.rs index dcd02e6046..fa40a5d1a7 100644 --- a/glide-core/redis-rs/redis/src/cluster_routing.rs +++ b/glide-core/redis-rs/redis/src/cluster_routing.rs @@ -5,7 +5,8 @@ use std::collections::HashMap; use crate::cluster_topology::get_slot; use crate::cmd::{Arg, Cmd}; use crate::types::Value; -use crate::{ErrorKind, RedisResult}; +use crate::{ErrorKind, RedisError, RedisResult}; +use std::borrow::Cow; use std::iter::Once; #[derive(Clone)] @@ -95,8 +96,13 @@ pub enum MultipleNodeRoutingInfo { AllNodes, /// Route to all primaries in the cluster AllMasters, - /// Instructions for how to split a multi-slot command (e.g. MGET, MSET) into sub-commands. Each tuple is the route for each subcommand, and the indices of the arguments from the original command that should be copied to the subcommand. - MultiSlot(Vec<(Route, Vec)>), + /// Routes the request to multiple slots. + /// This variant contains instructions for splitting a multi-slot command (e.g., MGET, MSET) into sub-commands. + /// Each tuple consists of a `Route` representing the target node for the subcommand, + /// and a vector of argument indices from the original command that should be copied to each subcommand. + /// The `MultiSlotArgPattern` specifies the pattern of the command’s arguments, indicating how they are organized + /// (e.g., only keys, key-value pairs, etc). + MultiSlot((Vec<(Route, Vec)>, MultiSlotArgPattern)), } /// Takes a routable and an iterator of indices, which is assued to be created from`MultipleNodeRoutingInfo::MultiSlot`, @@ -243,14 +249,123 @@ pub fn combine_array_results(values: Vec) -> RedisResult { Ok(Value::Array(results)) } -/// Combines multiple call results in the `values` field, each assume to be an array of results, -/// into a single array. `sorting_order` defines the order of the results in the returned array - -/// for each array of results, `sorting_order` should contain a matching array with the indices of -/// the results in the final array. -pub(crate) fn combine_and_sort_array_results<'a>( +// An iterator that yields `Cow<[usize]>` representing grouped result indices according to a specified argument pattern. +// This type is used to combine multi-slot array responses. +type MultiSlotResIdxIter<'a> = std::iter::Map< + std::slice::Iter<'a, (Route, Vec)>, + fn(&'a (Route, Vec)) -> Cow<'a, [usize]>, +>; + +/// Generates an iterator that yields a vector of result indices for each slot within the final merged results array for a multi-slot command response. +/// The indices are calculated based on the `args_pattern` and the positions of the arguments for each slot-specific request in the original multi-slot request, +/// ensuring that the results are ordered according to the structure of the initial multi-slot command. +/// +/// # Arguments +/// * `route_arg_indices` - A reference to a vector where each element is a tuple containing a route and +/// the corresponding argument indices for that route. +/// * `args_pattern` - Specifies the argument pattern (e.g., `KeysOnly`, `KeyValuePairs`, ..), which defines how the indices are grouped for each slot. +/// +/// # Returns +/// An iterator yielding `Cow<[usize]>` with the grouped result indices based on the specified argument pattern. +/// +/// /// For example, given the command `MSET foo bar foo2 bar2 {foo}foo3 bar3` with the `KeyValuePairs` pattern: +/// - `route_arg_indices` would include: +/// - Slot of "foo" with argument indices `[0, 1, 4, 5]` (where `{foo}foo3` hashes to the same slot as "foo" due to curly braces). +/// - Slot of "foo2" with argument indices `[2, 3]`. +/// - Using the `KeyValuePairs` pattern, each key-value pair contributes a single response, yielding three responses total. +/// - Therefore, the iterator generated by this function would yield grouped result indices as follows: +/// - Slot "foo" is mapped to `[0, 2]` in the final result order. +/// - Slot "foo2" is mapped to `[1]`. +fn calculate_multi_slot_result_indices<'a>( + route_arg_indices: &'a [(Route, Vec)], + args_pattern: &MultiSlotArgPattern, +) -> RedisResult> { + let check_indices_input = |step_count: usize| { + for (_, indices) in route_arg_indices { + if indices.len() % step_count != 0 { + return Err(RedisError::from(( + ErrorKind::ClientError, + "Invalid indices input detected", + format!( + "Expected argument pattern with tuples of size {step_count}, but found indices: {indices:?}" + ), + ))); + } + } + Ok(()) + }; + + match args_pattern { + MultiSlotArgPattern::KeysOnly => Ok(route_arg_indices + .iter() + .map(|(_, indices)| Cow::Borrowed(indices))), + MultiSlotArgPattern::KeysAndLastArg => { + // The last index corresponds to the path, skip it + Ok(route_arg_indices + .iter() + .map(|(_, indices)| Cow::Borrowed(&indices[..indices.len() - 1]))) + } + MultiSlotArgPattern::KeyWithTwoArgTriples => { + // For each triplet (key, path, value) we receive a single response. + // For example, for argument indices: [(_, [0,1,2]), (_, [3,4,5,9,10,11]), (_, [6,7,8])] + // The resulting grouped indices would be: [0], [1, 3], [2] + check_indices_input(3)?; + Ok(route_arg_indices.iter().map(|(_, indices)| { + Cow::Owned( + indices + .iter() + .step_by(3) + .map(|idx| idx / 3) + .collect::>(), + ) + })) + } + MultiSlotArgPattern::KeyValuePairs => + // For each pair (key, value) we receive a single response. + // For example, for argument indices: [(_, [0,1]), (_, [2,3,6,7]), (_, [4,5])] + // The resulting grouped indices would be: [0], [1, 3], [2] + { + check_indices_input(2)?; + Ok(route_arg_indices.iter().map(|(_, indices)| { + Cow::Owned( + indices + .iter() + .step_by(2) + .map(|idx| idx / 2) + .collect::>(), + ) + })) + } + } +} + +/// Merges the results of a multi-slot command from the `values` field, where each entry is expected to be an array of results. +/// The combined results are ordered according to the sequence in which they appeared in the original command. +/// +/// # Arguments +/// +/// * `values` - A vector of `Value`s, where each `Value` is expected to be an array representing results +/// from separate slots in a multi-slot command. Each `Value::Array` within `values` corresponds to +/// the results associated with a specific slot, as indicated by `route_arg_indices`. +/// +/// * `route_arg_indices` - A reference to a vector of tuples, where each tuple represents a route and a vector of +/// argument indices associated with that route. The route indicates the slot, while the indices vector +/// specifies the positions of arguments relevant to this slot. This is used to construct `sorting_order`, +/// which guides the placement of results in the final array. +/// +/// * `args_pattern` - Specifies the argument pattern (e.g., `KeysOnly`, `KeyValuePairs`, ...). +/// The pattern defines how the argument indices are grouped for each slot and determines +/// the ordering of results from `values` as they are placed in the final combined array. +/// +/// # Returns +/// +/// Returns a `RedisResult` containing the final ordered array (`Value::Array`) of combined results. +pub(crate) fn combine_and_sort_array_results( values: Vec, - sorting_order: impl ExactSizeIterator>, + route_arg_indices: &[(Route, Vec)], + args_pattern: &MultiSlotArgPattern, ) -> RedisResult { + let result_indices = calculate_multi_slot_result_indices(route_arg_indices, args_pattern)?; let mut results = Vec::new(); results.resize( values.iter().fold(0, |acc, value| match value { @@ -259,9 +374,19 @@ pub(crate) fn combine_and_sort_array_results<'a>( }), Value::Nil, ); - assert_eq!(values.len(), sorting_order.len()); + if values.len() != result_indices.len() { + return Err(RedisError::from(( + ErrorKind::ClientError, + "Mismatch in the number of multi-slot results compared to the expected result count.", + format!( + "Expected: {:?}, Found: {:?}", + values.len(), + result_indices.len() + ), + ))); + } - for (key_indices, value) in sorting_order.into_iter().zip(values) { + for (key_indices, value) in result_indices.into_iter().zip(values) { match value { Value::Array(values) => { assert_eq!(values.len(), key_indices.len()); @@ -287,6 +412,27 @@ fn get_route(is_readonly: bool, key: &[u8]) -> Route { } } +/// Represents the pattern of argument structures in multi-slot commands, +/// defining how the arguments are organized in the command. +#[derive(Debug, Clone, PartialEq)] +pub enum MultiSlotArgPattern { + /// Pattern where only keys are provided in the command. + /// For example: `MGET key1 key2` + KeysOnly, + + /// Pattern where each key is followed by a corresponding value. + /// For example: `MSET key1 value1 key2 value2` + KeyValuePairs, + + /// Pattern where a list of keys is followed by a shared parameter. + /// For example: `JSON.MGET key1 key2 key3 path` + KeysAndLastArg, + + /// Pattern where each key is followed by two associated arguments, forming key-argument-argument triples. + /// For example: `JSON.MSET key1 path1 value1 key2 path2 value2` + KeyWithTwoArgTriples, +} + /// Takes the given `routable` and creates a multi-slot routing info. /// This is used for commands like MSET & MGET, where if the command's keys /// are hashed to multiple slots, the command should be split into sub-commands, @@ -297,30 +443,69 @@ fn get_route(is_readonly: bool, key: &[u8]) -> Route { /// /// If all keys are routed to the same slot, there's no need to split the command, /// so a single node routing info will be returned. +/// +/// # Arguments +/// * `routable` - The command or structure containing key-related data that can be routed. +/// * `cmd` - A byte slice representing the command name or opcode (e.g., `b"MGET"`). +/// * `first_key_index` - The starting index in the command where the first key is located. +/// * `args_pattern` - Specifies how keys and values are patterned in the command (e.g., `OnlyKeys`, `KeyValuePairs`). +/// +/// # Returns +/// `Some(RoutingInfo)` if routing info is created, indicating the command targets multiple slots or a single slot; +/// `None` if no routing info could be derived. fn multi_shard( routable: &R, cmd: &[u8], first_key_index: usize, - has_values: bool, + args_pattern: MultiSlotArgPattern, ) -> Option where R: Routable + ?Sized, { let is_readonly = is_readonly_cmd(cmd); let mut routes = HashMap::new(); - let mut key_index = 0; - while let Some(key) = routable.arg_idx(first_key_index + key_index) { - let route = get_route(is_readonly, key); - let entry = routes.entry(route); - let keys = entry.or_insert(Vec::new()); - keys.push(key_index); - - if has_values { - key_index += 1; - routable.arg_idx(first_key_index + key_index)?; // check that there's a value for the key - keys.push(key_index); + let mut curr_arg_idx = 0; + let incr_add_next_arg = |arg_indices: &mut Vec, mut curr_arg_idx: usize| { + curr_arg_idx += 1; + // Ensure there's a value following the key + routable.arg_idx(curr_arg_idx)?; + arg_indices.push(curr_arg_idx); + Some(curr_arg_idx) + }; + while let Some(arg) = routable.arg_idx(first_key_index + curr_arg_idx) { + let route = get_route(is_readonly, arg); + let arg_indices = routes.entry(route).or_insert(Vec::new()); + + arg_indices.push(curr_arg_idx); + + match args_pattern { + MultiSlotArgPattern::KeysOnly => {} // no additional handling needed for keys-only commands + MultiSlotArgPattern::KeyValuePairs => { + // Increment to the value paired with the current key and add its index + curr_arg_idx = incr_add_next_arg(arg_indices, curr_arg_idx)?; + } + MultiSlotArgPattern::KeysAndLastArg => { + // Check if the command has more keys or if the next argument is a path + if routable + .arg_idx(first_key_index + curr_arg_idx + 2) + .is_none() + { + // Last key reached; add the path argument index for each route and break + let path_idx = curr_arg_idx + 1; + for (_, arg_indices) in routes.iter_mut() { + arg_indices.push(path_idx); + } + break; + } + } + MultiSlotArgPattern::KeyWithTwoArgTriples => { + // Increment to the first argument associated with the current key and add its index + curr_arg_idx = incr_add_next_arg(arg_indices, curr_arg_idx)?; + // Increment to the second argument associated with the current key and add its index + curr_arg_idx = incr_add_next_arg(arg_indices, curr_arg_idx)?; + } } - key_index += 1; + curr_arg_idx += 1; } let mut routes: Vec<(Route, Vec)> = routes.into_iter().collect(); @@ -328,7 +513,7 @@ where RoutingInfo::SingleNode(SingleNodeRoutingInfo::SpecificNode(routes.pop().unwrap().0)) } else { RoutingInfo::MultiNode(( - MultipleNodeRoutingInfo::MultiSlot(routes), + MultipleNodeRoutingInfo::MultiSlot((routes, args_pattern)), ResponsePolicy::for_command(cmd), )) }) @@ -350,15 +535,15 @@ impl ResponsePolicy { b"ACL SETUSER" | b"ACL DELUSER" | b"ACL SAVE" | b"CLIENT SETNAME" | b"CLIENT SETINFO" | b"CONFIG SET" | b"CONFIG RESETSTAT" | b"CONFIG REWRITE" | b"FLUSHALL" | b"FLUSHDB" | b"FUNCTION DELETE" | b"FUNCTION FLUSH" - | b"FUNCTION LOAD" | b"FUNCTION RESTORE" | b"MEMORY PURGE" | b"MSET" | b"PING" - | b"SCRIPT FLUSH" | b"SCRIPT LOAD" | b"SLOWLOG RESET" | b"UNWATCH" | b"WATCH" => { - Some(ResponsePolicy::AllSucceeded) - } + | b"FUNCTION LOAD" | b"FUNCTION RESTORE" | b"MEMORY PURGE" | b"MSET" | b"JSON.MSET" + | b"PING" | b"SCRIPT FLUSH" | b"SCRIPT LOAD" | b"SLOWLOG RESET" | b"UNWATCH" + | b"WATCH" => Some(ResponsePolicy::AllSucceeded), b"KEYS" | b"FT._ALIASLIST" | b"FT._LIST" | b"MGET" + | b"JSON.MGET" | b"SLOWLOG GET" | b"PUBSUB CHANNELS" | b"PUBSUB SHARDCHANNELS" => Some(ResponsePolicy::CombineArrays), @@ -390,8 +575,7 @@ enum RouteBy { AllNodes, AllPrimaries, FirstKey, - MultiShardNoValues, - MultiShardWithValues, + MultiShard(MultiSlotArgPattern), Random, SecondArg, SecondArgAfterKeyCount, @@ -454,10 +638,12 @@ fn base_routing(cmd: &[u8]) -> RouteBy { | b"WAITAOF" => RouteBy::AllPrimaries, b"MGET" | b"DEL" | b"EXISTS" | b"UNLINK" | b"TOUCH" | b"WATCH" => { - RouteBy::MultiShardNoValues + RouteBy::MultiShard(MultiSlotArgPattern::KeysOnly) } - b"MSET" => RouteBy::MultiShardWithValues, + b"MSET" => RouteBy::MultiShard(MultiSlotArgPattern::KeyValuePairs), + b"JSON.MGET" => RouteBy::MultiShard(MultiSlotArgPattern::KeysAndLastArg), + b"JSON.MSET" => RouteBy::MultiShard(MultiSlotArgPattern::KeyWithTwoArgTriples), // TODO - special handling - b"SCAN" b"SCAN" | b"SHUTDOWN" | b"SLAVEOF" | b"REPLICAOF" => RouteBy::Undefined, @@ -572,8 +758,7 @@ impl RoutingInfo { | RouteBy::ThirdArgAfterKeyCount | RouteBy::SecondArgSlot | RouteBy::StreamsIndex - | RouteBy::MultiShardNoValues - | RouteBy::MultiShardWithValues => { + | RouteBy::MultiShard(_) => { if matches!(cmd, b"SPUBLISH") { // SPUBLISH does not return MOVED errors within the slot's shard. This means that even if READONLY wasn't sent to a replica, // executing SPUBLISH FOO BAR on that replica will succeed. This behavior differs from true key-based commands, @@ -608,9 +793,7 @@ impl RoutingInfo { ResponsePolicy::for_command(cmd), ))), - RouteBy::MultiShardWithValues => multi_shard(r, cmd, 1, true), - - RouteBy::MultiShardNoValues => multi_shard(r, cmd, 1, false), + RouteBy::MultiShard(arg_pattern) => multi_shard(r, cmd, 1, arg_pattern), RouteBy::Random => Some(RoutingInfo::SingleNode(SingleNodeRoutingInfo::Random)), @@ -689,6 +872,52 @@ pub fn is_readonly_cmd(cmd: &[u8]) -> bool { b"BITCOUNT" | b"BITFIELD_RO" | b"BITPOS" + | b"CLIENT ID" + | b"CLIENT CACHING" + | b"CLIENT CAPA" + | b"CLIENT GETNAME" + | b"CLIENT GETREDIR" + | b"CLIENT HELP" + | b"CLIENT INFO" + | b"CLIENT KILL" + | b"CLIENT LIST" + | b"CLIENT NO-EVICT" + | b"CLIENT NO-TOUCH" + | b"CLIENT PAUSE" + | b"CLIENT REPLY" + | b"CLIENT SETINFO" + | b"CLIENT SETNAME" + | b"CLIENT TRACKING" + | b"CLIENT TRACKINGINFO" + | b"CLIENT UNBLOCK" + | b"CLIENT UNPAUSE" + | b"CLUSTER COUNT-FAILURE-REPORTS" + | b"CLUSTER COUNTKEYSINSLOT" + | b"CLUSTER FAILOVER" + | b"CLUSTER GETKEYSINSLOT" + | b"CLUSTER HELP" + | b"CLUSTER INFO" + | b"CLUSTER KEYSLOT" + | b"CLUSTER LINKS" + | b"CLUSTER MYID" + | b"CLUSTER MYSHARDID" + | b"CLUSTER NODES" + | b"CLUSTER REPLICATE" + | b"CLUSTER SAVECONFIG" + | b"CLUSTER SHARDS" + | b"CLUSTER SLOTS" + | b"COMMAND COUNT" + | b"COMMAND DOCS" + | b"COMMAND GETKEYS" + | b"COMMAND GETKEYSANDFLAGS" + | b"COMMAND HELP" + | b"COMMAND INFO" + | b"COMMAND LIST" + | b"CONFIG GET" + | b"CONFIG HELP" + | b"CONFIG RESETSTAT" + | b"CONFIG REWRITE" + | b"CONFIG SET" | b"DBSIZE" | b"DUMP" | b"EVAL_RO" @@ -705,6 +934,7 @@ pub fn is_readonly_cmd(cmd: &[u8]) -> bool { | b"FT._ALIASLIST" | b"FT._LIST" | b"FUNCTION DUMP" + | b"FUNCTION HELP" | b"FUNCTION KILL" | b"FUNCTION LIST" | b"FUNCTION STATS" @@ -717,6 +947,7 @@ pub fn is_readonly_cmd(cmd: &[u8]) -> bool { | b"GET" | b"GETBIT" | b"GETRANGE" + | b"HELLO" | b"HEXISTS" | b"HGET" | b"HGETALL" @@ -737,23 +968,55 @@ pub fn is_readonly_cmd(cmd: &[u8]) -> bool { | b"JSON.RESP" | b"JSON.STRLEN" | b"JSON.TYPE" + | b"INFO" | b"KEYS" + | b"LASTSAVE" + | b"LATENCY DOCTOR" + | b"LATENCY GRAPH" + | b"LATENCY HELP" + | b"LATENCY HISTOGRAM" + | b"LATENCY HISTORY" + | b"LATENCY LATEST" + | b"LATENCY RESET" | b"LCS" | b"LINDEX" | b"LLEN" | b"LOLWUT" | b"LPOS" | b"LRANGE" + | b"MEMORY DOCTOR" + | b"MEMORY HELP" + | b"MEMORY MALLOC-STATS" + | b"MEMORY PURGE" + | b"MEMORY STATS" | b"MEMORY USAGE" | b"MGET" + | b"MODULE HELP" + | b"MODULE LIST" + | b"MODULE LOAD" + | b"MODULE LOADEX" + | b"MODULE UNLOAD" | b"OBJECT ENCODING" | b"OBJECT FREQ" + | b"OBJECT HELP" | b"OBJECT IDLETIME" | b"OBJECT REFCOUNT" | b"PEXPIRETIME" | b"PFCOUNT" + | b"PING" | b"PTTL" + | b"PUBLISH" + | b"PUBSUB CHANNELS" + | b"PUBSUB HELP" + | b"PUBSUB NUMPAT" + | b"PUBSUB NUMSUB" + | b"PUBSUB SHARDCHANNELS" + | b"PUBSUB SHARDNUMSUB" | b"RANDOMKEY" + | b"REPLICAOF" + | b"RESET" + | b"ROLE" + | b"SAVE" | b"SCAN" | b"SCARD" | b"SCRIPT DEBUG" @@ -763,22 +1026,35 @@ pub fn is_readonly_cmd(cmd: &[u8]) -> bool { | b"SCRIPT LOAD" | b"SCRIPT SHOW" | b"SDIFF" + | b"SELECT" + | b"SHUTDOWN" | b"SINTER" | b"SINTERCARD" | b"SISMEMBER" | b"SMEMBERS" | b"SMISMEMBER" + | b"SLOWLOG GET" + | b"SLOWLOG HELP" + | b"SLOWLOG LEN" + | b"SLOWLOG RESET" | b"SORT_RO" + | b"SPUBLISH" | b"SRANDMEMBER" | b"SSCAN" + | b"SSUBSCRIBE" | b"STRLEN" + | b"SUBSCRIBE" | b"SUBSTR" | b"SUNION" + | b"SUNSUBSCRIBE" + | b"TIME" | b"TOUCH" | b"TTL" | b"TYPE" + | b"UNSUBSCRIBE" | b"XINFO CONSUMERS" | b"XINFO GROUPS" + | b"XINFO HELP" | b"XINFO STREAM" | b"XLEN" | b"XPENDING" @@ -991,10 +1267,10 @@ fn random_slot() -> u16 { } #[cfg(test)] -mod tests { +mod tests_routing { use super::{ - command_for_multi_slot_indices, AggregateOp, MultipleNodeRoutingInfo, ResponsePolicy, - Route, RoutingInfo, SingleNodeRoutingInfo, SlotAddr, + command_for_multi_slot_indices, AggregateOp, MultiSlotArgPattern, MultipleNodeRoutingInfo, + ResponsePolicy, Route, RoutingInfo, SingleNodeRoutingInfo, SlotAddr, }; use crate::{cluster_topology::slot, cmd, parser::parse_redis_value, Value}; use core::panic; @@ -1245,7 +1521,7 @@ mod tests { } #[test] - fn test_multi_shard() { + fn test_multi_shard_keys_only() { let mut cmd = cmd("DEL"); cmd.arg("foo").arg("bar").arg("baz").arg("{bar}vaz"); let routing = RoutingInfo::for_routable(&cmd); @@ -1255,11 +1531,11 @@ mod tests { expected.insert(Route(12182, SlotAddr::Master), vec![0]); assert!( - matches!(routing.clone(), Some(RoutingInfo::MultiNode((MultipleNodeRoutingInfo::MultiSlot(vec), Some(ResponsePolicy::Aggregate(AggregateOp::Sum))))) if { + matches!(routing.clone(), Some(RoutingInfo::MultiNode((MultipleNodeRoutingInfo::MultiSlot((vec, args_pattern)), Some(ResponsePolicy::Aggregate(AggregateOp::Sum))))) if { let routes = vec.clone().into_iter().collect(); - expected == routes + expected == routes && args_pattern == MultiSlotArgPattern::KeysOnly }), - "{routing:?}" + "expected={expected:?}\nrouting={routing:?}" ); let mut cmd = crate::cmd("MGET"); @@ -1271,11 +1547,84 @@ mod tests { expected.insert(Route(12182, SlotAddr::ReplicaOptional), vec![0]); assert!( - matches!(routing.clone(), Some(RoutingInfo::MultiNode((MultipleNodeRoutingInfo::MultiSlot(vec), Some(ResponsePolicy::CombineArrays)))) if { + matches!(routing.clone(), Some(RoutingInfo::MultiNode((MultipleNodeRoutingInfo::MultiSlot((vec, args_pattern)), Some(ResponsePolicy::CombineArrays)))) if { let routes = vec.clone().into_iter().collect(); - expected ==routes + expected == routes && args_pattern == MultiSlotArgPattern::KeysOnly }), - "{routing:?}" + "expected={expected:?}\nrouting={routing:?}" + ); + } + + #[test] + fn test_multi_shard_key_value_pairs() { + let mut cmd = cmd("MSET"); + cmd.arg("foo") // key slot 12182 + .arg("bar") // value + .arg("foo2") // key slot 1044 + .arg("bar2") // value + .arg("{foo}foo3") // key slot 12182 + .arg("bar3"); // value + let routing = RoutingInfo::for_routable(&cmd); + let mut expected = std::collections::HashMap::new(); + expected.insert(Route(1044, SlotAddr::Master), vec![2, 3]); + expected.insert(Route(12182, SlotAddr::Master), vec![0, 1, 4, 5]); + + assert!( + matches!(routing.clone(), Some(RoutingInfo::MultiNode((MultipleNodeRoutingInfo::MultiSlot((vec, args_pattern)), Some(ResponsePolicy::AllSucceeded)))) if { + let routes = vec.clone().into_iter().collect(); + expected == routes && args_pattern == MultiSlotArgPattern::KeyValuePairs + }), + "expected={expected:?}\nrouting={routing:?}" + ); + } + + #[test] + fn test_multi_shard_keys_and_path() { + let mut cmd = cmd("JSON.MGET"); + cmd.arg("foo") // key slot 12182 + .arg("bar") // key slot 5061 + .arg("baz") // key slot 4813 + .arg("{bar}vaz") // key slot 5061 + .arg("$.f.a"); // path + let routing = RoutingInfo::for_routable(&cmd); + let mut expected = std::collections::HashMap::new(); + expected.insert(Route(4813, SlotAddr::ReplicaOptional), vec![2, 4]); + expected.insert(Route(5061, SlotAddr::ReplicaOptional), vec![1, 3, 4]); + expected.insert(Route(12182, SlotAddr::ReplicaOptional), vec![0, 4]); + + assert!( + matches!(routing.clone(), Some(RoutingInfo::MultiNode((MultipleNodeRoutingInfo::MultiSlot((vec, args_pattern)), Some(ResponsePolicy::CombineArrays)))) if { + let routes = vec.clone().into_iter().collect(); + expected == routes && args_pattern == MultiSlotArgPattern::KeysAndLastArg + }), + "expected={expected:?}\nrouting={routing:?}" + ); + } + + #[test] + fn test_multi_shard_key_with_two_arg_triples() { + let mut cmd = cmd("JSON.MSET"); + cmd + .arg("foo") // key slot 12182 + .arg("$.a") // path + .arg("bar") // value + .arg("foo2") // key slot 1044 + .arg("$.f.a") // path + .arg("bar2") // value + .arg("{foo}foo3") // key slot 12182 + .arg("$.f.a") // path + .arg("bar3"); // value + let routing = RoutingInfo::for_routable(&cmd); + let mut expected = std::collections::HashMap::new(); + expected.insert(Route(1044, SlotAddr::Master), vec![3, 4, 5]); + expected.insert(Route(12182, SlotAddr::Master), vec![0, 1, 2, 6, 7, 8]); + + assert!( + matches!(routing.clone(), Some(RoutingInfo::MultiNode((MultipleNodeRoutingInfo::MultiSlot((vec, args_pattern)), Some(ResponsePolicy::AllSucceeded)))) if { + let routes = vec.clone().into_iter().collect(); + expected == routes && args_pattern == MultiSlotArgPattern::KeyWithTwoArgTriples + }), + "expected={expected:?}\nrouting={routing:?}" ); } @@ -1291,9 +1640,10 @@ mod tests { let expected = [vec![0], vec![1, 3], vec![2]]; let mut indices: Vec<_> = match routing { - Some(RoutingInfo::MultiNode((MultipleNodeRoutingInfo::MultiSlot(vec), _))) => { - vec.into_iter().map(|(_, indices)| indices).collect() - } + Some(RoutingInfo::MultiNode(( + MultipleNodeRoutingInfo::MultiSlot((vec, MultiSlotArgPattern::KeysOnly)), + _, + ))) => vec.into_iter().map(|(_, indices)| indices).collect(), _ => panic!("unexpected routing: {routing:?}"), }; indices.sort_by(|prev, next| prev.iter().next().unwrap().cmp(next.iter().next().unwrap())); // sorting because the `for_routable` doesn't return values in a consistent order between runs. @@ -1327,7 +1677,8 @@ mod tests { } #[test] - fn test_combining_results_into_single_array() { + fn test_combining_results_into_single_array_only_keys() { + // For example `MGET foo bar baz {baz}baz2 {bar}bar2 {foo}foo2` let res1 = Value::Array(vec![Value::Nil, Value::Okay]); let res2 = Value::Array(vec![ Value::BulkString("1".as_bytes().to_vec()), @@ -1336,18 +1687,95 @@ mod tests { let res3 = Value::Array(vec![Value::SimpleString("2".to_string()), Value::Int(3)]); let results = super::combine_and_sort_array_results( vec![res1, res2, res3], - [vec![0, 5], vec![1, 4], vec![2, 3]].iter(), + &[ + (Route(4813, SlotAddr::Master), vec![2, 3]), + (Route(5061, SlotAddr::Master), vec![1, 4]), + (Route(12182, SlotAddr::Master), vec![0, 5]), + ], + &MultiSlotArgPattern::KeysOnly, ); assert_eq!( results.unwrap(), Value::Array(vec![ - Value::Nil, - Value::BulkString("1".as_bytes().to_vec()), Value::SimpleString("2".to_string()), - Value::Int(3), + Value::BulkString("1".as_bytes().to_vec()), + Value::Nil, + Value::Okay, Value::BulkString("4".as_bytes().to_vec()), + Value::Int(3), + ]) + ); + } + + #[test] + fn test_combining_results_into_single_array_key_value_paires() { + // For example `MSET foo bar foo2 bar2 {foo}foo3 bar3` + let res1 = Value::Array(vec![Value::Okay]); + let res2 = Value::Array(vec![Value::BulkString("1".as_bytes().to_vec()), Value::Nil]); + let results = super::combine_and_sort_array_results( + vec![res1, res2], + &[ + (Route(1044, SlotAddr::Master), vec![2, 3]), + (Route(12182, SlotAddr::Master), vec![0, 1, 4, 5]), + ], + &MultiSlotArgPattern::KeyValuePairs, + ); + + assert_eq!( + results.unwrap(), + Value::Array(vec![ + Value::BulkString("1".as_bytes().to_vec()), + Value::Okay, + Value::Nil + ]) + ); + } + + #[test] + fn test_combining_results_into_single_array_keys_and_path() { + // For example `JSON.MGET foo bar {foo}foo2 $.a` + let res1 = Value::Array(vec![Value::Okay]); + let res2 = Value::Array(vec![Value::BulkString("1".as_bytes().to_vec()), Value::Nil]); + let results = super::combine_and_sort_array_results( + vec![res1, res2], + &[ + (Route(5061, SlotAddr::Master), vec![2, 3]), + (Route(12182, SlotAddr::Master), vec![0, 1, 3]), + ], + &MultiSlotArgPattern::KeysAndLastArg, + ); + + assert_eq!( + results.unwrap(), + Value::Array(vec![ + Value::BulkString("1".as_bytes().to_vec()), + Value::Nil, + Value::Okay, + ]) + ); + } + + #[test] + fn test_combining_results_into_single_array_key_with_two_arg_triples() { + // For example `JSON.MSET foo $.a bar foo2 $.f.a bar2 {foo}foo3 $.f bar3` + let res1 = Value::Array(vec![Value::Okay]); + let res2 = Value::Array(vec![Value::BulkString("1".as_bytes().to_vec()), Value::Nil]); + let results = super::combine_and_sort_array_results( + vec![res1, res2], + &[ + (Route(5061, SlotAddr::Master), vec![3, 4, 5]), + (Route(12182, SlotAddr::Master), vec![0, 1, 2, 6, 7, 8]), + ], + &MultiSlotArgPattern::KeyWithTwoArgTriples, + ); + + assert_eq!( + results.unwrap(), + Value::Array(vec![ + Value::BulkString("1".as_bytes().to_vec()), Value::Okay, + Value::Nil ]) ); } diff --git a/glide-core/src/client/value_conversion.rs b/glide-core/src/client/value_conversion.rs index f4706e762a..ca4a0371c9 100644 --- a/glide-core/src/client/value_conversion.rs +++ b/glide-core/src/client/value_conversion.rs @@ -1393,10 +1393,12 @@ pub(crate) fn expected_type_for_cmd(cmd: &Cmd) -> Option { // TODO use enum to avoid mistakes match command.as_slice() { - b"HGETALL" | b"CONFIG GET" | b"FT.CONFIG GET" | b"HELLO" => Some(ExpectedReturnType::Map { - key_type: &None, - value_type: &None, - }), + b"HGETALL" | b"CONFIG GET" | b"FT.CONFIG GET" | b"FT._ALIASLIST" | b"HELLO" => { + Some(ExpectedReturnType::Map { + key_type: &None, + value_type: &None, + }) + } b"XCLAIM" => { if cmd.position(b"JUSTID").is_some() { Some(ExpectedReturnType::ArrayOfStrings) diff --git a/go/api/commands.go b/go/api/commands.go index f48239c524..3c422dd687 100644 --- a/go/api/commands.go +++ b/go/api/commands.go @@ -132,9 +132,12 @@ type StringCommands interface { // Sets multiple keys to multiple values in a single operation. // // Note: - // When in cluster mode, the command may route to multiple nodes when keys in keyValueMap map to different hash slots. - // - // See [valkey.io] for details. + // In cluster mode, if keys in `keyValueMap` map to different hash slots, the command + // will be split across these slots and executed separately for each. This means the command + // is atomic only at the slot level. If one or more slot-specific requests fail, the entire + // call will return the first encountered error, even though some requests may have succeeded + // while others did not. If this behavior impacts your application logic, consider splitting + // the request into sub-requests per slot to ensure atomicity. // // Parameters: // keyValueMap - A key-value map consisting of keys and their respective values to set. @@ -153,9 +156,12 @@ type StringCommands interface { // Retrieves the values of multiple keys. // // Note: - // When in cluster mode, the command may route to multiple nodes when keys map to different hash slots. - // - // See [valkey.io] for details. + // In cluster mode, if keys in `keys` map to different hash slots, the command + // will be split across these slots and executed separately for each. This means the command + // is atomic only at the slot level. If one or more slot-specific requests fail, the entire + // call will return the first encountered error, even though some requests may have succeeded + // while others did not. If this behavior impacts your application logic, consider splitting + // the request into sub-requests per slot to ensure atomicity. // // Parameters: // keys - A list of keys to retrieve values for. diff --git a/java/client/src/main/java/glide/api/commands/GenericBaseCommands.java b/java/client/src/main/java/glide/api/commands/GenericBaseCommands.java index a55c1ef1a8..6234672899 100644 --- a/java/client/src/main/java/glide/api/commands/GenericBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/GenericBaseCommands.java @@ -23,8 +23,12 @@ public interface GenericBaseCommands { * Removes the specified keys from the database. A key is ignored if it does not * exist. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys The keys we wanted to remove. * @return The number of keys that were removed. @@ -40,8 +44,12 @@ public interface GenericBaseCommands { * Removes the specified keys from the database. A key is ignored if it does not * exist. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys The keys we wanted to remove. * @return The number of keys that were removed. @@ -56,8 +64,12 @@ public interface GenericBaseCommands { /** * Returns the number of keys in keys that exist in the database. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys The keys list to check. * @return The number of keys that exist. If the same existing key is mentioned in keys @@ -73,8 +85,12 @@ public interface GenericBaseCommands { /** * Returns the number of keys in keys that exist in the database. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys The keys list to check. * @return The number of keys that exist. If the same existing key is mentioned in keys @@ -93,8 +109,12 @@ public interface GenericBaseCommands { * specified keys and ignores non-existent ones. However, this command does not block the server, * while DEL does. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys The list of keys to unlink. * @return The number of keys that were unlinked. @@ -112,8 +132,12 @@ public interface GenericBaseCommands { * specified keys and ignores non-existent ones. However, this command does not block the server, * while DEL does. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys The list of keys to unlink. * @return The number of keys that were unlinked. @@ -952,8 +976,12 @@ CompletableFuture pexpireAt( /** * Updates the last access time of specified keys. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys The keys to update last access time. * @return The number of keys that were updated. @@ -968,8 +996,12 @@ CompletableFuture pexpireAt( /** * Updates the last access time of specified keys. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys The keys to update last access time. * @return The number of keys that were updated. diff --git a/java/client/src/main/java/glide/api/commands/StringBaseCommands.java b/java/client/src/main/java/glide/api/commands/StringBaseCommands.java index 3f46f6a2cb..20f13c30f2 100644 --- a/java/client/src/main/java/glide/api/commands/StringBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/StringBaseCommands.java @@ -249,8 +249,12 @@ public interface StringBaseCommands { /** * Retrieves the values of multiple keys. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys A list of keys to retrieve values for. * @return An array of values corresponding to the provided keys.
@@ -267,8 +271,12 @@ public interface StringBaseCommands { /** * Retrieves the values of multiple keys. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys - * map to different hash slots. + * @apiNote In cluster mode, if keys in keys map to different hash slots, the command + * will be split across these slots and executed separately for each. This means the command + * is atomic only at the slot level. If one or more slot-specific requests fail, the entire + * call will return the first encountered error, even though some requests may have succeeded + * while others did not. If this behavior impacts your application logic, consider splitting + * the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keys A list of keys to retrieve values for. * @return An array of values corresponding to the provided keys.
@@ -285,11 +293,15 @@ public interface StringBaseCommands { /** * Sets multiple keys to multiple values in a single operation. * - * @apiNote When in cluster mode, the command may route to multiple nodes when keys in - * keyValueMap map to different hash slots. + * @apiNote In cluster mode, if keys in keyValueMap map to different hash slots, the + * command will be split across these slots and executed separately for each. This means the + * command is atomic only at the slot level. If one or more slot-specific requests fail, the + * entire call will return the first encountered error, even though some requests may have + * succeeded while others did not. If this behavior impacts your application logic, consider + * splitting the request into sub-requests per slot to ensure atomicity. * @see valkey.io for details. * @param keyValueMap A key-value map consisting of keys and their respective values to set. - * @return Always OK. + * @return A simple OK response. * @example *
{@code
      * String result = client.mset(Map.of("key1", "value1", "key2", "value2"}).get();
@@ -301,11 +313,15 @@ public interface StringBaseCommands {
     /**
      * Sets multiple keys to multiple values in a single operation.
      *
-     * @apiNote When in cluster mode, the command may route to multiple nodes when keys in 
-     *     keyValueMap map to different hash slots.
+     * @apiNote In cluster mode, if keys in keyValueMap map to different hash slots, the
+     *     command will be split across these slots and executed separately for each. This means the
+     *     command is atomic only at the slot level. If one or more slot-specific requests fail, the
+     *     entire call will return the first encountered error, even though some requests may have
+     *     succeeded while others did not. If this behavior impacts your application logic, consider
+     *     splitting the request into sub-requests per slot to ensure atomicity.
      * @see valkey.io for details.
      * @param keyValueMap A key-value map consisting of keys and their respective values to set.
-     * @return Always OK.
+     * @return A simple OK response.
      * @example
      *     
{@code
      * String result = client.msetBinary(Map.of(gs("key1"), gs("value1"), gs("key2"), gs("value2")}).get();
diff --git a/java/client/src/main/java/glide/api/commands/TransactionsBaseCommands.java b/java/client/src/main/java/glide/api/commands/TransactionsBaseCommands.java
index 199357fdfe..d4db27785e 100644
--- a/java/client/src/main/java/glide/api/commands/TransactionsBaseCommands.java
+++ b/java/client/src/main/java/glide/api/commands/TransactionsBaseCommands.java
@@ -15,8 +15,12 @@ public interface TransactionsBaseCommands {
      * will only execute commands if the watched keys are not modified before execution of the
      * transaction.
      *
-     * @apiNote When in cluster mode, the command may route to multiple nodes when keys
-     *     map to different hash slots.
+     * @apiNote In cluster mode, if keys in keys map to different hash slots, the command
+     *     will be split across these slots and executed separately for each. This means the command
+     *     is atomic only at the slot level. If one or more slot-specific requests fail, the entire
+     *     call will return the first encountered error, even though some requests may have succeeded
+     *     while others did not. If this behavior impacts your application logic, consider splitting
+     *     the request into sub-requests per slot to ensure atomicity.
      * @see valkey.io for details.
      * @param keys The keys to watch.
      * @return OK.
@@ -41,8 +45,12 @@ public interface TransactionsBaseCommands {
      * will only execute commands if the watched keys are not modified before execution of the
      * transaction.
      *
-     * @apiNote When in cluster mode, the command may route to multiple nodes when keys
-     *     map to different hash slots.
+     * @apiNote In cluster mode, if keys in keys map to different hash slots, the command
+     *     will be split across these slots and executed separately for each. This means the command
+     *     is atomic only at the slot level. If one or more slot-specific requests fail, the entire
+     *     call will return the first encountered error, even though some requests may have succeeded
+     *     while others did not. If this behavior impacts your application logic, consider splitting
+     *     the request into sub-requests per slot to ensure atomicity.
      * @see valkey.io for details.
      * @param keys The keys to watch.
      * @return OK.
diff --git a/java/client/src/main/java/glide/api/commands/servermodules/FT.java b/java/client/src/main/java/glide/api/commands/servermodules/FT.java
index 5f45a9d381..1f27772e1c 100644
--- a/java/client/src/main/java/glide/api/commands/servermodules/FT.java
+++ b/java/client/src/main/java/glide/api/commands/servermodules/FT.java
@@ -18,6 +18,7 @@
 import java.util.Arrays;
 import java.util.Map;
 import java.util.concurrent.CompletableFuture;
+import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import lombok.NonNull;
 
@@ -532,6 +533,7 @@ public static CompletableFuture profile(
     /**
      * Returns information about a given index.
      *
+     * @param client The client to execute the command.
      * @param indexName The index name.
      * @return Nested maps with info about the index. See example for more details.
      * @example
@@ -544,7 +546,7 @@ public static CompletableFuture profile(
      *     "index_status", gs("AVAILABLE"),
      *     "key_type", gs("JSON"),
      *     "creation_timestamp", 1728348101728771L,
-     *     "key_prefixes", new String[] { gs("json:") },
+     *     "key_prefixes", new Object[] { gs("json:") },
      *     "num_indexed_vectors", 0L,
      *     "space_usage", 653471L,
      *     "num_docs", 0L,
@@ -580,17 +582,13 @@ public static CompletableFuture profile(
      */
     public static CompletableFuture> info(
             @NonNull BaseClient client, @NonNull String indexName) {
-        // TODO inconsistency: the outer map is `Map`,
-        //   while inner maps are `Map`
-        //   The outer map converted from `Map` in ClusterValue::ofMultiValueBinary
-        // TODO server returns all strings as `SimpleString`, we're safe to convert all to
-        //   `GlideString`s to `String`
-        return executeCommand(client, new GlideString[] {gs("FT.INFO"), gs(indexName)}, true);
+        return info(client, gs(indexName));
     }
 
     /**
      * Returns information about a given index.
      *
+     * @param client The client to execute the command.
      * @param indexName The index name.
      * @return Nested maps with info about the index. See example for more details.
      * @example
@@ -603,7 +601,7 @@ public static CompletableFuture> info(
      *     "index_status", gs("AVAILABLE"),
      *     "key_type", gs("JSON"),
      *     "creation_timestamp", 1728348101728771L,
-     *     "key_prefixes", new String[] { gs("json:") },
+     *     "key_prefixes", new Object[] { gs("json:") },
      *     "num_indexed_vectors", 0L,
      *     "space_usage", 653471L,
      *     "num_docs", 0L,
@@ -639,7 +637,36 @@ public static CompletableFuture> info(
      */
     public static CompletableFuture> info(
             @NonNull BaseClient client, @NonNull GlideString indexName) {
-        return executeCommand(client, new GlideString[] {gs("FT.INFO"), indexName}, true);
+        // TODO inconsistency on cluster client: the outer map is `Map`,
+        //   while inner maps are `Map`
+        //   The outer map converted from `Map` in ClusterValue::ofMultiValueBinary
+        // TODO server returns all map keys as `SimpleString`, we're safe to convert all to
+        //   `GlideString`s to `String`
+
+        // standalone client returns `Map`, but cluster `Map`
+        if (client instanceof GlideClusterClient)
+            return executeCommand(client, new GlideString[] {gs("FT.INFO"), indexName}, true);
+        return FT.>executeCommand(
+                        client, new GlideString[] {gs("FT.INFO"), indexName}, true)
+                .thenApply(
+                        map ->
+                                map.entrySet().stream()
+                                        .collect(Collectors.toMap(e -> e.getKey().toString(), Map.Entry::getValue)));
+    }
+
+    /**
+     * Lists all indexes.
+     *
+     * @param client The client to execute the command.
+     * @return An array of index names.
+     * @example
+     *     
{@code
+     * GlideString[] indices = FT.list(client).get();
+     * }
+ */ + public static CompletableFuture list(@NonNull BaseClient client) { + return FT.executeCommand(client, new GlideString[] {gs("FT._LIST")}, false) + .thenApply(arr -> castArray(arr, GlideString.class)); } /** @@ -720,7 +747,7 @@ public static CompletableFuture aliasdel( * * @param client The client to execute the command. * @param aliasName The alias name. This alias will now be pointed to a different index. - * @param indexName The index name for which an existing alias has to updated. + * @param indexName The index name for which an existing alias has to be updated. * @return "OK". * @example *
{@code
@@ -738,11 +765,11 @@ public static CompletableFuture aliasupdate(
      *
      * @param client The client to execute the command.
      * @param aliasName The alias name. This alias will now be pointed to a different index.
-     * @param indexName The index name for which an existing alias has to updated.
+     * @param indexName The index name for which an existing alias has to be updated.
      * @return "OK".
      * @example
      *     
{@code
-     * FT.aliasupdate(client,gs("myalias"), gs("myindex")).get(); // "OK"
+     * FT.aliasupdate(client, gs("myalias"), gs("myindex")).get(); // "OK"
      * }
*/ public static CompletableFuture aliasupdate( @@ -751,6 +778,35 @@ public static CompletableFuture aliasupdate( return executeCommand(client, args, false); } + /** + * Lists all index aliases. + * + * @param client The client to execute the command. + * @return A map of index aliases to indices being aliased. + * @example + *
{@code
+     * var aliases = FT.aliaslist(client).get();
+     * // the response contains data in the following format:
+     * Map aliases = Map.of(
+     *     gs("alias"), gs("myIndex"),
+     * );
+     * }
+ */ + public static CompletableFuture> aliaslist( + @NonNull BaseClient client) { + // standalone client returns `Map`, but cluster `Map` + // The map converted from `Map` in ClusterValue::ofMultiValueBinary + // TODO this will fail once an alias name will be non-utf8-compatible + if (client instanceof GlideClient) + return executeCommand(client, new GlideString[] {gs("FT._ALIASLIST")}, true); + return FT.>executeCommand( + client, new GlideString[] {gs("FT._ALIASLIST")}, true) + .thenApply( + map -> + map.entrySet().stream() + .collect(Collectors.toMap(e -> gs(e.getKey()), Map.Entry::getValue))); + } + /** * Parse a query and return information about how that query was parsed. * diff --git a/java/client/src/main/java/glide/api/commands/servermodules/Json.java b/java/client/src/main/java/glide/api/commands/servermodules/Json.java index 02ea0ff07b..a43f30f3af 100644 --- a/java/client/src/main/java/glide/api/commands/servermodules/Json.java +++ b/java/client/src/main/java/glide/api/commands/servermodules/Json.java @@ -419,17 +419,18 @@ public static CompletableFuture get( * @param key The key of the JSON document. * @param path Represents the path within the JSON document where the values * will be appended. - * @param values The values to append to the JSON array at the specified path - * . + * @param values The JSON values to be appended to the array.
+ * JSON string values must be wrapped with quotes. For example, to append "foo", + * pass "\"foo\"". * @return *
    *
  • For JSONPath (path starts with $):
    * Returns a list of integers for every possible path, indicating the new length of the - * new array after appending values, or null for JSON values + * array after appending values, or null for JSON values * matching the path that are not an array. If path does not exist, an * empty array will be returned. *
  • For legacy path (path doesn't start with $):
    - * Returns the length of the new array after appending values to the array + * Returns the new length of the array after appending values to the array * at path. If multiple paths are matched, returns the last updated array. * If the JSON value at path is not a array or if path doesn't * exist, an error is raised. If key doesn't exist, an error is raised. @@ -459,8 +460,9 @@ public static CompletableFuture arrappend( * @param key The key of the JSON document. * @param path Represents the path within the JSON document where the values * will be appended. - * @param values The values to append to the JSON array at the specified path - * . + * @param values The JSON values to be appended to the array.
    + * JSON string values must be wrapped with quotes. For example, to append "foo", + * pass "\"foo\"". * @return *
      *
    • For JSONPath (path starts with $):
      @@ -499,8 +501,8 @@ public static CompletableFuture arrappend( * @param key The key of the JSON document. * @param path The path within the JSON document. * @param index The array index before which values are inserted. - * @param values The JSON values to be inserted into the array, in JSON formatted bytes or str. - * JSON string values must be wrapped with quotes. For example, to append "foo", + * @param values The JSON values to be inserted into the array.
      + * JSON string values must be wrapped with quotes. For example, to insert "foo", * pass "\"foo\"". * @return *
        @@ -552,8 +554,8 @@ public static CompletableFuture arrinsert( * @param key The key of the JSON document. * @param path The path within the JSON document. * @param index The array index before which values are inserted. - * @param values The JSON values to be inserted into the array, in JSON formatted bytes or str. - * JSON string values must be wrapped with quotes. For example, to append "foo", + * @param values The JSON values to be inserted into the array.
        + * JSON string values must be wrapped with quotes. For example, to insert "foo", * pass "\"foo\"". * @return *
          diff --git a/java/client/src/main/java/glide/api/models/BaseTransaction.java b/java/client/src/main/java/glide/api/models/BaseTransaction.java index 062150c3d2..3914b05049 100644 --- a/java/client/src/main/java/glide/api/models/BaseTransaction.java +++ b/java/client/src/main/java/glide/api/models/BaseTransaction.java @@ -583,7 +583,7 @@ public T mget(@NonNull ArgType[] keys) { * * @see valkey.io for details. * @param keyValueMap A key-value map consisting of keys and their respective values to set. - * @return Command Response - Always OK. + * @return Command Response - A simple OK response. */ public T mset(@NonNull Map keyValueMap) { GlideString[] args = flattenMapToGlideStringArray(keyValueMap); diff --git a/java/integTest/src/test/java/glide/PubSubTests.java b/java/integTest/src/test/java/glide/PubSubTests.java index e4eeab6cad..7b4e835b80 100644 --- a/java/integTest/src/test/java/glide/PubSubTests.java +++ b/java/integTest/src/test/java/glide/PubSubTests.java @@ -307,8 +307,8 @@ public void exact_happy_path(boolean standalone, MessageReadMethod method) { @MethodSource("getTestScenarios") public void exact_happy_path_many_channels(boolean standalone, MessageReadMethod method) { skipTestsOnMac(); - int numChannels = 256; - int messagesPerChannel = 256; + int numChannels = 16; + int messagesPerChannel = 16; var messages = new ArrayList(numChannels * messagesPerChannel); ChannelMode mode = exact(standalone); Map> subscriptions = Map.of(mode, new HashSet<>()); @@ -366,8 +366,8 @@ public void sharded_pubsub_many_channels(MessageReadMethod method) { assumeTrue(SERVER_VERSION.isGreaterThanOrEqualTo("7.0.0"), "This feature added in version 7"); skipTestsOnMac(); - int numChannels = 256; - int pubsubMessagesPerChannel = 256; + int numChannels = 16; + int pubsubMessagesPerChannel = 16; var pubsubMessages = new ArrayList(numChannels * pubsubMessagesPerChannel); PubSubClusterChannelMode mode = PubSubClusterChannelMode.SHARDED; Map> subscriptions = Map.of(mode, new HashSet<>()); @@ -444,8 +444,8 @@ public void pattern_many_channels(boolean standalone, MessageReadMethod method) skipTestsOnMac(); String prefix = "channel."; GlideString pattern = gs(prefix + "*"); - int numChannels = 256; - int messagesPerChannel = 256; + int numChannels = 16; + int messagesPerChannel = 16; ChannelMode mode = standalone ? PubSubChannelMode.PATTERN : PubSubClusterChannelMode.PATTERN; var messages = new ArrayList(numChannels * messagesPerChannel); var subscriptions = Map.of(mode, Set.of(pattern)); @@ -482,8 +482,8 @@ public void combined_exact_and_pattern_one_client(boolean standalone, MessageRea skipTestsOnMac(); String prefix = "channel."; GlideString pattern = gs(prefix + "*"); - int numChannels = 256; - int messagesPerChannel = 256; + int numChannels = 16; + int messagesPerChannel = 16; var messages = new ArrayList(numChannels * messagesPerChannel); ChannelMode mode = standalone ? PubSubChannelMode.EXACT : PubSubClusterChannelMode.EXACT; Map> subscriptions = @@ -533,7 +533,7 @@ public void combined_exact_and_pattern_multiple_clients( skipTestsOnMac(); String prefix = "channel."; GlideString pattern = gs(prefix + "*"); - int numChannels = 256; + int numChannels = 16; var messages = new ArrayList(numChannels * 2); ChannelMode mode = exact(standalone); Map> subscriptions = Map.of(mode, new HashSet<>()); @@ -604,7 +604,7 @@ public void combined_exact_pattern_and_sharded_one_client(MessageReadMethod meth String prefix = "channel."; GlideString pattern = gs(prefix + "*"); String shardPrefix = "{shard}"; - int numChannels = 256; + int numChannels = 16; var messages = new ArrayList(numChannels * 2); var shardedMessages = new ArrayList(numChannels); Map> subscriptions = @@ -660,7 +660,7 @@ public void coexistense_of_sync_and_async_read() { String prefix = "channel."; String pattern = prefix + "*"; String shardPrefix = "{shard}"; - int numChannels = 256; + int numChannels = 16; var messages = new ArrayList(numChannels * 2); var shardedMessages = new ArrayList(numChannels); Map> subscriptions = @@ -742,7 +742,7 @@ public void combined_exact_pattern_and_sharded_multi_client(MessageReadMethod me String prefix = "channel."; GlideString pattern = gs(prefix + "*"); String shardPrefix = "{shard}"; - int numChannels = 256; + int numChannels = 16; var exactMessages = new ArrayList(numChannels); var patternMessages = new ArrayList(numChannels); var shardedMessages = new ArrayList(numChannels); diff --git a/java/integTest/src/test/java/glide/modules/VectorSearchTests.java b/java/integTest/src/test/java/glide/modules/VectorSearchTests.java index 55a1c28ce8..f53f7ced30 100644 --- a/java/integTest/src/test/java/glide/modules/VectorSearchTests.java +++ b/java/integTest/src/test/java/glide/modules/VectorSearchTests.java @@ -319,8 +319,8 @@ public void ft_search() { @SneakyThrows @Test - public void ft_drop() { - var index = UUID.randomUUID().toString(); + public void ft_drop_and_ft_list() { + var index = gs(UUID.randomUUID().toString()); assertEquals( OK, FT.create( @@ -331,17 +331,11 @@ public void ft_drop() { }) .get()); - // TODO use FT.LIST with it is done - var before = - Set.of((Object[]) client.customCommand(new String[] {"FT._LIST"}).get().getSingleValue()); + var before = Set.of(FT.list(client).get()); assertEquals(OK, FT.dropindex(client, index).get()); - // TODO use FT.LIST with it is done - var after = - new HashSet<>( - Set.of( - (Object[]) client.customCommand(new String[] {"FT._LIST"}).get().getSingleValue())); + var after = new HashSet<>(Set.of(FT.list(client).get())); assertFalse(after.contains(index)); after.add(index); @@ -676,7 +670,7 @@ public void ft_info() { @SneakyThrows @Test - public void ft_aliasadd_aliasdel_aliasupdate() { + public void ft_aliasadd_aliasdel_aliasupdate_aliaslist() { var alias1 = "alias1"; var alias2 = "a2"; @@ -693,7 +687,9 @@ public void ft_aliasadd_aliasdel_aliasupdate() { }) .get()); + assertEquals(0, FT.aliaslist(client).get().size()); assertEquals(OK, FT.aliasadd(client, alias1, indexName).get()); + assertEquals(Map.of(gs(alias1), gs(indexName)), FT.aliaslist(client).get()); // error with adding the same alias to the same index var exception = @@ -702,6 +698,8 @@ public void ft_aliasadd_aliasdel_aliasupdate() { assertTrue(exception.getMessage().contains("Alias already exists")); assertEquals(OK, FT.aliasupdate(client, alias2, indexName).get()); + assertEquals( + Map.of(gs(alias1), gs(indexName), gs(alias2), gs(indexName)), FT.aliaslist(client).get()); assertEquals(OK, FT.aliasdel(client, alias2).get()); // with GlideString: diff --git a/node/jest.config.js b/node/jest.config.js index 6952aecfca..607c4c0830 100644 --- a/node/jest.config.js +++ b/node/jest.config.js @@ -29,5 +29,5 @@ module.exports = { }, ], ], - setupFilesAfterEnv: ["./tests/setup.js"], + setupFilesAfterEnv: ["./tests/setup.ts"], }; diff --git a/node/npm/glide/index.ts b/node/npm/glide/index.ts index 781fd26594..8e7ef5de14 100644 --- a/node/npm/glide/index.ts +++ b/node/npm/glide/index.ts @@ -128,6 +128,14 @@ function initialize() { FtCreateOptions, FtSearchOptions, FtInfoReturnType, + FtAggregateOptions, + FtAggregateLimit, + FtAggregateFilter, + FtAggregateGroupBy, + FtAggregateReducer, + FtAggregateSortBy, + FtAggregateSortProperty, + FtAggregateApply, FtSearchReturnType, GlideRecord, GlideString, @@ -252,6 +260,14 @@ function initialize() { FtCreateOptions, FtSearchOptions, FtInfoReturnType, + FtAggregateOptions, + FtAggregateLimit, + FtAggregateFilter, + FtAggregateGroupBy, + FtAggregateReducer, + FtAggregateSortBy, + FtAggregateSortProperty, + FtAggregateApply, FtSearchReturnType, GlideRecord, GlideJson, diff --git a/node/package.json b/node/package.json index 685d1338fd..cda3849de7 100644 --- a/node/package.json +++ b/node/package.json @@ -34,7 +34,7 @@ "fix-protobuf-file": "replace 'this\\.encode\\(message, writer\\)\\.ldelim' 'this.encode(message, writer && writer.len ? writer.fork() : writer).ldelim' src/ProtobufMessage.js", "test": "npm run build-test-utils && jest --verbose --testPathIgnorePatterns='ServerModules'", "test-minimum": "npm run build-test-utils && jest --verbose --runInBand --testNamePattern='^(.(?!(GlideJson|GlideFt|pubsub|kill)))*$'", - "test-modules": "npm run build-test-utils && jest --verbose --testNamePattern='(GlideJson|GlideFt)'", + "test-modules": "npm run build-test-utils && jest --verbose --runInBand --testNamePattern='(GlideJson|GlideFt)'", "build-test-utils": "cd ../utils && npm i && npm run build", "lint:fix": "npm run install-linting && npx eslint -c ../eslint.config.mjs --fix && npm run prettier:format", "lint": "npm run install-linting && npx eslint -c ../eslint.config.mjs && npm run prettier:check:ci", @@ -59,9 +59,9 @@ "replace": "^1.2.2", "semver": "^7.6.3", "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", "typescript": "^5.5.4", - "uuid": "^10.0.0", - "ts-node": "^10.9.2" + "uuid": "^11.0" }, "author": "Valkey GLIDE Maintainers", "license": "Apache-2.0", diff --git a/node/src/BaseClient.ts b/node/src/BaseClient.ts index 768f995119..36cee35b72 100644 --- a/node/src/BaseClient.ts +++ b/node/src/BaseClient.ts @@ -1396,6 +1396,14 @@ export class BaseClient { * * @see {@link https://valkey.io/commands/del/|valkey.io} for details. * + * @remarks In cluster mode, if keys in `keys` map to different hash slots, + * the command will be split across these slots and executed separately for each. + * This means the command is atomic only at the slot level. If one or more slot-specific + * requests fail, the entire call will return the first encountered error, even + * though some requests may have succeeded while others did not. + * If this behavior impacts your application logic, consider splitting the + * request into sub-requests per slot to ensure atomicity. + * * @param keys - The keys we wanted to remove. * @returns The number of keys that were removed. * @@ -1496,7 +1504,14 @@ export class BaseClient { /** Retrieve the values of multiple keys. * * @see {@link https://valkey.io/commands/mget/|valkey.io} for details. - * @remarks When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + * + * @remarks In cluster mode, if keys in `keys` map to different hash slots, + * the command will be split across these slots and executed separately for each. + * This means the command is atomic only at the slot level. If one or more slot-specific + * requests fail, the entire call will return the first encountered error, even + * though some requests may have succeeded while others did not. + * If this behavior impacts your application logic, consider splitting the + * request into sub-requests per slot to ensure atomicity. * * @param keys - A list of keys to retrieve values for. * @param options - (Optional) See {@link DecoderOption}. @@ -1522,10 +1537,18 @@ export class BaseClient { /** Set multiple keys to multiple values in a single operation. * * @see {@link https://valkey.io/commands/mset/|valkey.io} for details. - * @remarks When in cluster mode, the command may route to multiple nodes when keys in `keyValueMap` map to different hash slots. + * + * @remarks In cluster mode, if keys in `keyValueMap` map to different hash slots, + * the command will be split across these slots and executed separately for each. + * This means the command is atomic only at the slot level. If one or more slot-specific + * requests fail, the entire call will return the first encountered error, even + * though some requests may have succeeded while others did not. + * If this behavior impacts your application logic, consider splitting the + * request into sub-requests per slot to ensure atomicity. * * @param keysAndValues - A list of key-value pairs to set. - * @returns always "OK". + * + * @returns A simple "OK" response. * * @example * ```typescript @@ -3434,6 +3457,14 @@ export class BaseClient { /** * Returns the number of keys in `keys` that exist in the database. * + * @remarks In cluster mode, if keys in `keys` map to different hash slots, + * the command will be split across these slots and executed separately for each. + * This means the command is atomic only at the slot level. If one or more slot-specific + * requests fail, the entire call will return the first encountered error, even + * though some requests may have succeeded while others did not. + * If this behavior impacts your application logic, consider splitting the + * request into sub-requests per slot to ensure atomicity. + * * @see {@link https://valkey.io/commands/exists/|valkey.io} for details. * * @param keys - The keys list to check. @@ -3456,6 +3487,14 @@ export class BaseClient { * This command, similar to {@link del}, removes specified keys and ignores non-existent ones. * However, this command does not block the server, while {@link https://valkey.io/commands/del|`DEL`} does. * + * @remarks In cluster mode, if keys in `keys` map to different hash slots, + * the command will be split across these slots and executed separately for each. + * This means the command is atomic only at the slot level. If one or more slot-specific + * requests fail, the entire call will return the first encountered error, even + * though some requests may have succeeded while others did not. + * If this behavior impacts your application logic, consider splitting the + * request into sub-requests per slot to ensure atomicity. + * * @see {@link https://valkey.io/commands/unlink/|valkey.io} for details. * * @param keys - The keys we wanted to unlink. @@ -7081,7 +7120,14 @@ export class BaseClient { * Updates the last access time of the specified keys. * * @see {@link https://valkey.io/commands/touch/|valkey.io} for more details. - * @remarks When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + * + * @remarks In cluster mode, if keys in `keys` map to different hash slots, + * the command will be split across these slots and executed separately for each. + * This means the command is atomic only at the slot level. If one or more slot-specific + * requests fail, the entire call will return the first encountered error, even + * though some requests may have succeeded while others did not. + * If this behavior impacts your application logic, consider splitting the + * request into sub-requests per slot to ensure atomicity. * * @param keys - The keys to update the last access time of. * @returns The number of keys that were updated. A key is ignored if it doesn't exist. @@ -7104,7 +7150,14 @@ export class BaseClient { * transaction. Executing a transaction will automatically flush all previously watched keys. * * @see {@link https://valkey.io/commands/watch/|valkey.io} and {@link https://valkey.io/topics/transactions/#cas|Valkey Glide Wiki} for more details. - * @remarks When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + * + * @remarks In cluster mode, if keys in `keys` map to different hash slots, + * the command will be split across these slots and executed separately for each. + * This means the command is atomic only at the slot level. If one or more slot-specific + * requests fail, the entire call will return the first encountered error, even + * though some requests may have succeeded while others did not. + * If this behavior impacts your application logic, consider splitting the + * request into sub-requests per slot to ensure atomicity. * * @param keys - The keys to watch. * @returns A simple `"OK"` response. diff --git a/node/src/server-modules/GlideFt.ts b/node/src/server-modules/GlideFt.ts index 60c7f72459..a5beba429b 100644 --- a/node/src/server-modules/GlideFt.ts +++ b/node/src/server-modules/GlideFt.ts @@ -12,7 +12,12 @@ import { } from "../BaseClient"; import { GlideClient } from "../GlideClient"; import { GlideClusterClient } from "../GlideClusterClient"; -import { Field, FtCreateOptions, FtSearchOptions } from "./GlideFtOptions"; +import { + Field, + FtAggregateOptions, + FtCreateOptions, + FtSearchOptions, +} from "./GlideFtOptions"; /** Response type of {@link GlideFt.info | ft.info} command. */ export type FtInfoReturnType = Record< @@ -40,7 +45,6 @@ export class GlideFt { * @param indexName - The index name for the index to be created. * @param schema - The fields of the index schema, specifying the fields and their types. * @param options - (Optional) Options for the `FT.CREATE` command. See {@link FtCreateOptions}. - * * @returns If the index is successfully created, returns "OK". * * @example @@ -192,7 +196,6 @@ export class GlideFt { * * @param client - The client to execute the command. * @param indexName - The index name. - * * @returns "OK" * * @example @@ -212,13 +215,187 @@ export class GlideFt { }) as Promise<"OK">; } + /** + * Lists all indexes. + * + * @param client - The client to execute the command. + * @param options - (Optional) See {@link DecoderOption}. + * @returns An array of index names. + * + * @example + * ```typescript + * console.log(await GlideFt.list(client)); // Output: ["index1", "index2"] + * ``` + */ + static async list( + client: GlideClient | GlideClusterClient, + options?: DecoderOption, + ): Promise { + return _handleCustomCommand(client, ["FT._LIST"], options) as Promise< + GlideString[] + >; + } + + /** + * Runs a search query on an index, and perform aggregate transformations on the results. + * + * @param client - The client to execute the command. + * @param indexName - The index name. + * @param query - The text query to search. + * @param options - Additional parameters for the command - see {@link FtAggregateOptions} and {@link DecoderOption}. + * @returns Results of the last stage of the pipeline. + * + * @example + * ```typescript + * const options: FtAggregateOptions = { + * loadFields: ["__key"], + * clauses: [ + * { + * type: "GROUPBY", + * properties: ["@condition"], + * reducers: [ + * { + * function: "TOLIST", + * args: ["__key"], + * name: "bicycles", + * }, + * ], + * }, + * ], + * }; + * const result = await GlideFt.aggregate("myIndex", "*", options); + * console.log(result); // Output: + * // [ + * // [ + * // { + * // key: "condition", + * // value: "refurbished" + * // }, + * // { + * // key: "bicycles", + * // value: [ "bicycle:9" ] + * // } + * // ], + * // [ + * // { + * // key: "condition", + * // value: "used" + * // }, + * // { + * // key: "bicycles", + * // value: [ "bicycle:1", "bicycle:2", "bicycle:3" ] + * // } + * // ], + * // [ + * // { + * // key: "condition", + * // value: "new" + * // }, + * // { + * // key: "bicycles", + * // value: [ "bicycle:0", "bicycle:5" ] + * // } + * // ] + * // ] + * ``` + */ + static async aggregate( + client: GlideClient | GlideClusterClient, + indexName: GlideString, + query: GlideString, + options?: DecoderOption & FtAggregateOptions, + ): Promise[]> { + const args: GlideString[] = ["FT.AGGREGATE", indexName, query]; + + if (options) { + if (options.loadAll) args.push("LOAD", "*"); + else if (options.loadFields) + args.push( + "LOAD", + options.loadFields.length.toString(), + ...options.loadFields, + ); + + if (options.timeout) + args.push("TIMEOUT", options.timeout.toString()); + + if (options.params) { + args.push( + "PARAMS", + (options.params.length * 2).toString(), + ...options.params.flatMap((pair) => pair), + ); + } + + if (options.clauses) { + for (const clause of options.clauses) { + switch (clause.type) { + case "LIMIT": + args.push( + clause.type, + clause.offset.toString(), + clause.count.toString(), + ); + break; + case "FILTER": + args.push(clause.type, clause.expression); + break; + case "GROUPBY": + args.push( + clause.type, + clause.properties.length.toString(), + ...clause.properties, + ); + + for (const reducer of clause.reducers) { + args.push( + "REDUCE", + reducer.function, + reducer.args.length.toString(), + ...reducer.args, + ); + if (reducer.name) args.push("AS", reducer.name); + } + + break; + case "SORTBY": + args.push( + clause.type, + (clause.properties.length * 2).toString(), + ); + for (const property of clause.properties) + args.push(property.property, property.order); + if (clause.max) + args.push("MAX", clause.max.toString()); + break; + case "APPLY": + args.push( + clause.type, + clause.expression, + "AS", + clause.name, + ); + break; + default: + throw new Error( + "Unknown clause type in FtAggregateOptions", + ); + } + } + } + } + + return _handleCustomCommand(client, args, options) as Promise< + GlideRecord[] + >; + } + /** * Returns information about a given index. * * @param client - The client to execute the command. * @param indexName - The index name. * @param options - (Optional) See {@link DecoderOption}. - * * @returns Nested maps with info about the index. See example for more details. * * @example @@ -278,6 +455,62 @@ export class GlideFt { ).then(convertGlideRecordToRecord); } + /** + * Parse a query and return information about how that query was parsed. + * + * @param client - The client to execute the command. + * @param indexName - The index name. + * @param query - The text query to search. It is the same as the query passed as + * an argument to {@link search | FT.SEARCH} or {@link aggregate | FT.AGGREGATE}. + * @param options - (Optional) See {@link DecoderOption}. + * @returns A query execution plan. + * + * @example + * ```typescript + * const result = GlideFt.explain(client, "myIndex", "@price:[0 10]"); + * console.log(result); // Output: "Field {\n\tprice\n\t0\n\t10\n}" + * ``` + */ + static explain( + client: GlideClient | GlideClusterClient, + indexName: GlideString, + query: GlideString, + options?: DecoderOption, + ): Promise { + const args = ["FT.EXPLAIN", indexName, query]; + + return _handleCustomCommand(client, args, options); + } + + /** + * Parse a query and return information about how that query was parsed. + * Same as {@link explain | FT.EXPLAIN}, except that the results are + * displayed in a different format. + * + * @param client - The client to execute the command. + * @param indexName - The index name. + * @param query - The text query to search. It is the same as the query passed as + * an argument to {@link search | FT.SEARCH} or {@link aggregate | FT.AGGREGATE}. + * @param options - (Optional) See {@link DecoderOption}. + * @returns A query execution plan. + * + * @example + * ```typescript + * const result = GlideFt.explaincli(client, "myIndex", "@price:[0 10]"); + * console.log(result); // Output: ["Field {", "price", "0", "10", "}"] + * ``` + */ + static explaincli( + client: GlideClient | GlideClusterClient, + indexName: GlideString, + query: GlideString, + options?: DecoderOption, + ): Promise { + const args = ["FT.EXPLAINCLI", indexName, query]; + + return _handleCustomCommand(client, args, options); + } + /** * Uses the provided query expression to locate keys within an index. Once located, the count * and/or content of indexed fields within those keys can be returned. @@ -286,7 +519,6 @@ export class GlideFt { * @param indexName - The index name to search into. * @param query - The text query to search. * @param options - (Optional) See {@link FtSearchOptions} and {@link DecoderOption}. - * * @returns A two-element array, where the first element is the number of documents in the result set, and the * second element has the format: `GlideRecord>`: * a mapping between document names and a map of their attributes. @@ -393,17 +625,96 @@ export class GlideFt { [number, GlideRecord>] >; } + + /** + * Adds an alias for an index. The new alias name can be used anywhere that an index name is required. + * + * @param client - The client to execute the command. + * @param indexName - The alias to be added to the index. + * @param alias - The index name for which the alias has to be added. + * @returns `"OK"` + * + * @example + * ```typescript + * // Example usage of FT.ALIASADD to add an alias for an index. + * await GlideFt.aliasadd(client, "index", "alias"); // "OK" + * ``` + */ + static async aliasadd( + client: GlideClient | GlideClusterClient, + indexName: GlideString, + alias: GlideString, + ): Promise<"OK"> { + const args: GlideString[] = ["FT.ALIASADD", alias, indexName]; + return _handleCustomCommand(client, args, { + decoder: Decoder.String, + }) as Promise<"OK">; + } + + /** + * Deletes an existing alias for an index. + * + * @param client - The client to execute the command. + * @param alias - The existing alias to be deleted for an index. + * @returns `"OK"` + * + * @example + * ```typescript + * // Example usage of FT.ALIASDEL to delete an existing alias. + * await GlideFt.aliasdel(client, "alias"); // "OK" + * ``` + */ + static async aliasdel( + client: GlideClient | GlideClusterClient, + alias: GlideString, + ): Promise<"OK"> { + const args: GlideString[] = ["FT.ALIASDEL", alias]; + return _handleCustomCommand(client, args, { + decoder: Decoder.String, + }) as Promise<"OK">; + } + + /** + * Updates an existing alias to point to a different physical index. This command only affects future references to the alias. + * + * @param client - The client to execute the command. + * @param alias - The alias name. This alias will now be pointed to a different index. + * @param indexName - The index name for which an existing alias has to updated. + * @returns `"OK"` + * + * @example + * ```typescript + * // Example usage of FT.ALIASUPDATE to update an alias to point to a different index. + * await GlideFt.aliasupdate(client, "newAlias", "index"); // "OK" + * ``` + */ + static async aliasupdate( + client: GlideClient | GlideClusterClient, + alias: GlideString, + indexName: GlideString, + ): Promise<"OK"> { + const args: GlideString[] = ["FT.ALIASUPDATE", alias, indexName]; + return _handleCustomCommand(client, args, { + decoder: Decoder.String, + }) as Promise<"OK">; + } } /** * @internal */ -async function _handleCustomCommand( +async function _handleCustomCommand( client: GlideClient | GlideClusterClient, args: GlideString[], decoderOption: DecoderOption = {}, -): Promise { +): Promise { return client instanceof GlideClient - ? (client as GlideClient).customCommand(args, decoderOption) - : (client as GlideClusterClient).customCommand(args, decoderOption); + ? ((client as GlideClient).customCommand( + args, + decoderOption, + ) as Promise) + : ((client as GlideClusterClient).customCommand( + args, + decoderOption, + ) as Promise); } diff --git a/node/src/server-modules/GlideFtOptions.ts b/node/src/server-modules/GlideFtOptions.ts index fffccd11c1..e6fe836154 100644 --- a/node/src/server-modules/GlideFtOptions.ts +++ b/node/src/server-modules/GlideFtOptions.ts @@ -2,7 +2,9 @@ * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +import { SortOrder } from "src/Commands"; import { GlideRecord, GlideString } from "../BaseClient"; +import { GlideFt } from "./GlideFt"; // eslint-disable-line @typescript-eslint/no-unused-vars interface BaseField { /** The name of the field. */ @@ -109,7 +111,7 @@ export type VectorFieldAttributesHnsw = VectorFieldAttributes & { export type Field = TextField | TagField | NumericField | VectorField; /** - * Represents the input options to be used in the FT.CREATE command. + * Represents the input options to be used in the {@link GlideFt.create | FT.CREATE} command. * All fields in this class are optional inputs for FT.CREATE. */ export interface FtCreateOptions { @@ -119,6 +121,109 @@ export interface FtCreateOptions { prefixes?: GlideString[]; } +/** Additional parameters for {@link GlideFt.aggregate | FT.AGGREGATE} command. */ +export type FtAggregateOptions = { + /** Query timeout in milliseconds. */ + timeout?: number; + /** + * {@link FtAggregateFilter | FILTER}, {@link FtAggregateLimit | LIMIT}, {@link FtAggregateGroupBy | GROUPBY}, + * {@link FtAggregateSortBy | SORTBY} and {@link FtAggregateApply | APPLY} clauses, that can be repeated + * multiple times in any order and be freely intermixed. They are applied in the order specified, + * with the output of one clause feeding the input of the next clause. + */ + clauses?: ( + | FtAggregateLimit + | FtAggregateFilter + | FtAggregateGroupBy + | FtAggregateSortBy + | FtAggregateApply + )[]; + /** The key/value pairs can be referenced from within the query expression. */ + params?: [GlideString, GlideString][]; +} & ( + | { + /** List of fields to load from the index. */ + loadFields?: GlideString[]; + /** `loadAll` and `loadFields` are mutually exclusive. */ + loadAll?: never; + } + | { + /** Option to load all fields declared in the index */ + loadAll?: boolean; + /** `loadAll` and `loadFields` are mutually exclusive. */ + loadFields?: never; + } +); + +/** A clause for limiting the number of retained records. */ +export interface FtAggregateLimit { + type: "LIMIT"; + /** Starting point from which the records have to be retained. */ + offset: number; + /** The total number of records to be retained. */ + count: number; +} + +/** + * A clause for filtering the results using predicate expression relating to values in each result. + * It is applied post query and relate to the current state of the pipeline. + */ +export interface FtAggregateFilter { + type: "FILTER"; + /** The expression to filter the results. */ + expression: GlideString; +} + +/** A clause for grouping the results in the pipeline based on one or more properties. */ +export interface FtAggregateGroupBy { + type: "GROUPBY"; + /** The list of properties to be used for grouping the results in the pipeline. */ + properties: GlideString[]; + /** The list of functions that handles the group entries by performing multiple aggregate operations. */ + reducers: FtAggregateReducer[]; +} + +/** + * A clause for reducing the matching results in each group using a reduction function. + * The matching results are reduced into a single record. + */ +export interface FtAggregateReducer { + /** The reduction function name for the respective group. */ + function: string; + /** The list of arguments for the reducer. */ + args: GlideString[]; + /** User defined property name for the reducer. */ + name?: GlideString; +} + +/** A clause for sorting the pipeline up until the point of SORTBY, using a list of properties. */ +export interface FtAggregateSortBy { + type: "SORTBY"; + /** A list of sorting parameters for the sort operation. */ + properties: FtAggregateSortProperty[]; + /** The MAX value for optimizing the sorting, by sorting only for the n-largest elements. */ + max?: number; +} + +/** A single property for the {@link FtAggregateSortBy | SORTBY} clause. */ +export interface FtAggregateSortProperty { + /** The sorting parameter. */ + property: GlideString; + /** The order for the sorting. */ + order: SortOrder; +} + +/** + * A clause for applying a 1-to-1 transformation on one or more properties and stores the result + * as a new property down the pipeline or replaces any property using this transformation. + */ +export interface FtAggregateApply { + type: "APPLY"; + /** The transformation expression. */ + expression: GlideString; + /** The new property name to store the result of apply. This name can be referenced by further operations down the pipeline. */ + name: GlideString; +} /** * Represents the input options to be used in the FT.SEARCH command. * All fields in this class are optional inputs for FT.SEARCH. diff --git a/node/src/server-modules/GlideJson.ts b/node/src/server-modules/GlideJson.ts index 2db3e453ae..9b30fb9aeb 100644 --- a/node/src/server-modules/GlideJson.ts +++ b/node/src/server-modules/GlideJson.ts @@ -203,8 +203,8 @@ export class GlideJson { * @param key - The key of the JSON document. * @param path - The path within the JSON document. * @param index - The array index before which values are inserted. - * @param values - The JSON values to be inserted into the array, in JSON formatted bytes or str. - * JSON string values must be wrapped with quotes. For example, to append `"foo"`, pass `"\"foo\""`. + * @param values - The JSON values to be inserted into the array. + * JSON string values must be wrapped with quotes. For example, to insert `"foo"`, pass `"\"foo\""`. * @returns * - For JSONPath (path starts with `$`): * Returns an array with a list of integers for every possible path, @@ -404,6 +404,59 @@ export class GlideJson { return _executeCommand>(client, args); } + /** + * Searches for the first occurrence of a `scalar` JSON value in the arrays at the `path`. + * Out of range errors are treated by rounding the index to the array's `start` and `end. + * If `start` > `end`, return `-1` (not found). + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param scalar - The scalar value to search for. + * @param options - (Optional) Additional parameters: + * - (Optional) `start`: The start index, inclusive. Default to 0 if not provided. + * - (Optional) `end`: The end index, exclusive. Default to 0 if not provided. + * 0 or -1 means the last element is included. + * @returns + * - For JSONPath (path starts with `$`): + * Returns an array with a list of integers for every possible path, + * indicating the index of the matching element. The value is `-1` if not found. + * If a value is not an array, its corresponding return value is `null`. + * - For legacy path (path doesn't start with `$`): + * Returns an integer representing the index of matching element, or `-1` if + * not found. If the value at the `path` is not an array, an error is raised. + * + * @example + * ```typescript + * await GlideJson.set(client, "doc", "$", '{"a": ["value", 3], "b": {"a": [3, ["value", false], 5]}}'); + * console.log(await GlideJson.arrindex(client, "doc", "$..a", 3, { start: 3, end: 3 }); // Output: [2, -1] + * ``` + */ + static async arrindex( + client: BaseClient, + key: GlideString, + path: GlideString, + scalar: GlideString | number | boolean | null, + options?: { start: number; end?: number }, + ): Promise> { + const args = ["JSON.ARRINDEX", key, path]; + + if (typeof scalar === `number`) { + args.push(scalar.toString()); + } else if (typeof scalar === `boolean`) { + args.push(scalar ? `true` : `false`); + } else if (scalar !== null) { + args.push(scalar); + } else { + args.push(`null`); + } + + if (options?.start !== undefined) args.push(options?.start.toString()); + if (options?.end !== undefined) args.push(options?.end.toString()); + + return _executeCommand(client, args); + } + /** * Toggles a Boolean value stored at the specified `path` within the JSON document stored at `key`. * @@ -423,23 +476,23 @@ export class GlideJson { * const resultSet = await GlideJson.set("doc", "$", jsonStr); * // Output: 'OK' * - * const resultToggle = await.GlideJson.toggle(client, "doc", "$.bool") + * const resultToggle = await.GlideJson.toggle(client, "doc", {path: "$.bool"}); * // Output: [false, true, null] - Indicates successful toggling of the Boolean values at path '$.bool' in the key stored at `doc`. * - * const resultToggle = await.GlideJson.toggle(client, "doc", "bool") + * const resultToggle = await.GlideJson.toggle(client, "doc", {path: "bool"}); * // Output: true - Indicates successful toggling of the Boolean value at path 'bool' in the key stored at `doc`. * - * const resultToggle = await.GlideJson.toggle(client, "doc", "bool") + * const resultToggle = await.GlideJson.toggle(client, "doc", {path: "bool"}); * // Output: true - Indicates successful toggling of the Boolean value at path 'bool' in the key stored at `doc`. * - * const jsonGetStr = await GlideJson.get(client, "doc", "$"); + * const jsonGetStr = await GlideJson.get(client, "doc", {path: "$"}); * console.log(JSON.stringify(jsonGetStr)); * // Output: [{bool: true, nested: {bool: true, nested: {bool: 10}}}] - The updated JSON value in the key stored at `doc`. * * // Without specifying a path, the path defaults to root. * console.log(await GlideJson.set(client, "doc2", ".", true)); // Output: "OK" - * console.log(await GlideJson.toggle(client, "doc2")); // Output: "false" - * console.log(await GlideJson.toggle(client, "doc2")); // Output: "true" + * console.log(await GlideJson.toggle(client, {path: "doc2"})); // Output: "false" + * console.log(await GlideJson.toggle(client, {path: "doc2"})); // Output: "true" * ``` */ static async toggle( @@ -505,9 +558,9 @@ export class GlideJson { * ```typescript * console.log(await GlideJson.set(client, "doc", "$", '{a: 1, nested: {a:2, b:3}}')); * // Output: "OK" - Indicates successful setting of the value at path '$' in the key stored at `doc`. - * console.log(await GlideJson.forget(client, "doc", "$..a")); + * console.log(await GlideJson.forget(client, "doc", {path: "$..a"})); * // Output: 2 - Indicates successful deletion of the specific values in the key stored at `doc`. - * console.log(await GlideJson.get(client, "doc", "$")); + * console.log(await GlideJson.get(client, "doc", {path: "$"})); * // Output: "[{nested: {b: 3}}]" - Returns the value at path '$' in the JSON document stored at `doc`. * console.log(await GlideJson.forget(client, "doc")); * // Output: 1 - Deletes the entire JSON document stored at `doc`. @@ -549,7 +602,7 @@ export class GlideJson { * ```typescript * console.log(await GlideJson.set(client, "doc", "$", '[1, 2.3, "foo", true, null, {}, []]')); * // Output: 'OK' - Indicates successful setting of the value at path '$' in the key stored at `doc`. - * const result = await GlideJson.type(client, "doc", "$[*]"); + * const result = await GlideJson.type(client, "doc", {path: "$[*]"}); * console.log(result); * // Output: ["integer", "number", "string", "boolean", null, "object", "array"]; * console.log(await GlideJson.set(client, "doc2", ".", "{Name: 'John', Age: 27}")); @@ -571,6 +624,59 @@ export class GlideJson { return _executeCommand>(client, args); } + /** + * Clears arrays or objects at the specified JSON path in the document stored at `key`. + * Numeric values are set to `0`, boolean values are set to `false`, and string values are converted to empty strings. + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The JSON path to the arrays or objects to be cleared. Defaults to root if not provided. + * @returns The number of containers cleared, numeric values zeroed, and booleans toggled to `false`, + * and string values converted to empty strings. + * If `path` doesn't exist, or the value at `path` is already empty (e.g., an empty array, object, or string), `0` is returned. + * If `key doesn't exist, an error is raised. + * + * @example + * ```typescript + * console.log(await GlideJson.set(client, "doc", "$", '{"obj":{"a":1, "b":2}, "arr":[1,2,3], "str": "foo", "bool": true, "int": 42, "float": 3.14, "nullVal": null}')); + * // Output: 'OK' - JSON document is successfully set. + * console.log(await GlideJson.clear(client, "doc", {path: "$.*"})); + * // Output: 6 - 6 values are cleared (arrays/objects/strings/numbers/booleans), but `null` remains as is. + * console.log(await GlideJson.get(client, "doc", "$")); + * // Output: '[{"obj":{},"arr":[],"str":"","bool":false,"int":0,"float":0.0,"nullVal":null}]' + * console.log(await GlideJson.clear(client, "doc", {path: "$.*"})); + * // Output: 0 - No further clearing needed since the containers are already empty and the values are defaults. + * + * console.log(await GlideJson.set(client, "doc", "$", '{"a": 1, "b": {"a": [5, 6, 7], "b": {"a": true}}, "c": {"a": "value", "b": {"a": 3.5}}, "d": {"a": {"foo": "foo"}}, "nullVal": null}')); + * // Output: 'OK' + * console.log(await GlideJson.clear(client, "doc", {path: "b.a[1:3]"})); + * // Output: 2 - 2 elements (`6` and `7`) are cleared. + * console.log(await GlideJson.clear(client, "doc", {path: "b.a[1:3]"})); + * // Output: 0 - No elements cleared since specified slice has already been cleared. + * console.log(await GlideJson.get(client, "doc", {path: "$..a"})); + * // Output: '[1,[5,0,0],true,"value",3.5,{"foo":"foo"}]' + * + * console.log(await GlideJson.clear(client, "doc", {path: "$..a"})); + * // Output: 6 - All numeric, boolean, and string values across paths are cleared. + * console.log(await GlideJson.get(client, "doc", {path: "$..a"})); + * // Output: '[0,[],false,"",0.0,{}]' + * ``` + */ + static async clear( + client: BaseClient, + key: GlideString, + options?: { path: GlideString }, + ): Promise> { + const args = ["JSON.CLEAR", key]; + + if (options) { + args.push(options.path); + } + + return _executeCommand>(client, args); + } + /** * Retrieve the JSON value at the specified `path` within the JSON document stored at `key`. * The returning result is in the Valkey or Redis OSS Serialization Protocol (RESP). @@ -724,4 +830,289 @@ export class GlideJson { return _executeCommand>(client, args); } + + /** + * Appends one or more `values` to the JSON array at the specified `path` within the JSON + * document stored at `key`. + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param values - The JSON values to be appended to the array. + * JSON string values must be wrapped with quotes. For example, to append `"foo"`, pass `"\"foo\""`. + * @returns + * - For JSONPath (path starts with `$`): + * Returns an array with a list of integers for every possible path, + * indicating the new length of the array, or `null` for JSON values matching + * the path that are not an array. If `path` does not exist, an empty array + * will be returned. + * - For legacy path (path doesn't start with `$`): + * Returns an integer representing the new length of the array. If multiple paths are + * matched, returns the length of the first modified array. If `path` doesn't + * exist or the value at `path` is not an array, an error is raised. + * - If the index is out of bounds or `key` doesn't exist, an error is raised. + * + * @example + * ```typescript + * await GlideJson.set(client, "doc", "$", '{"a": 1, "b": ["one", "two"]}'); + * const result = await GlideJson.arrappend(client, "doc", "$.b", ["three"]); + * console.log(result); // Output: [3] - the new length of the array at path '$.b' after appending the value. + * const result = await GlideJson.arrappend(client, "doc", ".b", ["four"]); + * console.log(result); // Output: 4 - the new length of the array at path '.b' after appending the value. + * const doc = await json.get(client, "doc"); + * console.log(doc); // Output: '{"a": 1, "b": ["one", "two", "three", "four"]}' + * ``` + */ + static async arrappend( + client: BaseClient, + key: GlideString, + path: GlideString, + values: GlideString[], + ): Promise> { + const args = ["JSON.ARRAPPEND", key, path, ...values]; + return _executeCommand(client, args); + } + + /** + * Reports memory usage in bytes of a JSON object at the specified `path` within the JSON document stored at `key`. + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param value - The value to append to the string. Must be wrapped with single quotes. For example, to append "foo", pass '"foo"'. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, returns total memory usage if no path is given. + * @returns + * - For JSONPath (path starts with `$`): + * - Returns an array of numbers for every possible path, indicating the memory usage. + * If `path` does not exist, an empty array will be returned. + * - For legacy path (path doesn't start with `$`): + * - Returns an integer representing the memory usage. If multiple paths are matched, + * returns the data of the first matching object. If `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, returns `null`. + * + * @example + * ```typescript + * console.log(await GlideJson.set(client, "doc", "$", '[1, 2.3, "foo", true, null, {}, [], {a:1, b:2}, [1, 2, 3]]')); + * // Output: 'OK' - Indicates successful setting of the value at path '$' in the key stored at `doc`. + * console.log(await GlideJson.debugMemory(client, "doc", {path: ".."}); + * // Output: 258 + * ``` + */ + static async debugMemory( + client: BaseClient, + key: GlideString, + options?: { path: GlideString }, + ): Promise> { + const args = ["JSON.DEBUG", "MEMORY", key]; + + if (options) { + args.push(options.path); + } + + return _executeCommand(client, args); + } + + /** + * Reports the number of fields at the specified `path` within the JSON document stored at `key`. + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param value - The value to append to the string. Must be wrapped with single quotes. For example, to append "foo", pass '"foo"'. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, returns total number of fields if no path is given. + * @returns + * - For JSONPath (path starts with `$`): + * - Returns an array of numbers for every possible path, indicating the number of fields. + * If `path` does not exist, an empty array will be returned. + * - For legacy path (path doesn't start with `$`): + * - Returns an integer representing the memory usage. If multiple paths are matched, + * returns the data of the first matching object. If `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, returns `null`. + * + * @example + * ```typescript + * console.log(await GlideJson.set(client, "doc", "$", '[1, 2.3, "foo", true, null, {}, [], {a:1, b:2}, [1, 2, 3]]')); + * // Output: 'OK' - Indicates successful setting of the value at path '$' in the key stored at `doc`. + * console.log(await GlideJson.debugMemory(client, "doc", {path: "$[*]"}); + * // Output: [1, 1, 1, 1, 1, 0, 0, 2, 3] + * ``` + */ + static async debugFields( + client: BaseClient, + key: GlideString, + options?: { path: GlideString }, + ): Promise> { + const args = ["JSON.DEBUG", "FIELDS", key]; + + if (options) { + args.push(options.path); + } + + return _executeCommand(client, args); + } + + /** + * Increments or decrements the JSON value(s) at the specified `path` by `number` within the JSON document stored at `key`. + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param num - The number to increment or decrement by. + * @returns + * - For JSONPath (path starts with `$`): + * - Returns a string representation of an array of strings, indicating the new values after incrementing for each matched `path`. + * If a value is not a number, its corresponding return value will be `null`. + * If `path` doesn't exist, a byte string representation of an empty array will be returned. + * - For legacy path (path doesn't start with `$`): + * - Returns a string representation of the resulting value after the increment or decrement. + * If multiple paths match, the result of the last updated value is returned. + * If the value at the `path` is not a number or `path` doesn't exist, an error is raised. + * - If `key` does not exist, an error is raised. + * - If the result is out of the range of 64-bit IEEE double, an error is raised. + * + * @example + * ```typescript + * console.log(await GlideJson.set(client, "doc", "$", '{"a": [], "b": [1], "c": [1, 2], "d": [1, 2, 3]}')); + * // Output: 'OK' - Indicates successful setting of the value at path '$' in the key stored at `doc`. + * console.log(await GlideJson.numincrby(client, "doc", "$.d[*]", 10)) + * // Output: '[11,12,13]' - Increment each element in `d` array by 10. + * + * console.log(await GlideJson.numincrby(client, "doc", ".c[1]", 10)); + * // Output: '12' - Increment the second element in the `c` array by 10. + * ``` + */ + static async numincrby( + client: BaseClient, + key: GlideString, + path: GlideString, + num: number, + ): Promise { + const args = ["JSON.NUMINCRBY", key, path, num.toString()]; + return _executeCommand(client, args); + } + + /** + * Multiplies the JSON value(s) at the specified `path` by `number` within the JSON document stored at `key`. + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param num - The number to multiply by. + * @returns + * - For JSONPath (path starts with `$`): + * - Returns a GlideString representation of an array of strings, indicating the new values after multiplication for each matched `path`. + * If a value is not a number, its corresponding return value will be `null`. + * If `path` doesn't exist, a byte string representation of an empty array will be returned. + * - For legacy path (path doesn't start with `$`): + * - Returns a GlideString representation of the resulting value after multiplication. + * If multiple paths match, the result of the last updated value is returned. + * If the value at the `path` is not a number or `path` doesn't exist, an error is raised. + * - If `key` does not exist, an error is raised. + * - If the result is out of the range of 64-bit IEEE double, an error is raised. + * + * @example + * ```typescript + * console.log(await GlideJson.set(client, "doc", "$", '{"a": [], "b": [1], "c": [1, 2], "d": [1, 2, 3]}')); + * // Output: 'OK' - Indicates successful setting of the value at path '$' in the key stored at `doc`. + * console.log(await GlideJson.nummultby(client, "doc", "$.d[*]", 2)) + * // Output: '[2,4,6]' - Multiplies each element in the `d` array by 2. + * + * console.log(await GlideJson.nummultby(client, "doc", ".c[1]", 2)); + * // Output: '4' - Multiplies the second element in the `c` array by 2. + * ``` + */ + static async nummultby( + client: BaseClient, + key: GlideString, + path: GlideString, + num: number, + ): Promise { + const args = ["JSON.NUMMULTBY", key, path, num.toString()]; + return _executeCommand(client, args); + } + + /** + * Retrieves the number of key-value pairs in the object stored at the specified `path` within the JSON document stored at `key`. + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, Defaults to root (`"."`) if not provided. + * @returns ReturnTypeJson: + * - For JSONPath (`path` starts with `$`): + * - Returns a list of integer replies for every possible path, indicating the length of the object, + * or `null` for JSON values matching the path that are not an object. + * - If `path` doesn't exist, an empty array will be returned. + * - For legacy path (`path` doesn't starts with `$`): + * - Returns the length of the object at `path`. + * - If multiple paths match, the length of the first object match is returned. + * - If the JSON value at `path` is not an object or if `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, `null` is returned. + * + * @example + * ```typescript + * console.log(await GlideJson.set(client, "doc", "$", '{"a": 1.0, "b": {"a": {"x": 1, "y": 2}, "b": 2.5, "c": true}}')); + * // Output: 'OK' - Indicates successful setting of the value at the root path '$' in the key `doc`. + * console.log(await GlideJson.objlen(client, "doc", { path: "$" })); + * // Output: [2] - Returns the number of key-value pairs at the root object, which has 2 keys: 'a' and 'b'. + * console.log(await GlideJson.objlen(client, "doc", { path: "." })); + * // Output: 2 - Returns the number of key-value pairs for the object matching the path '.', which has 2 keys: 'a' and 'b'. + * ``` + */ + static async objlen( + client: BaseClient, + key: GlideString, + options?: { path: GlideString }, + ): Promise> { + const args = ["JSON.OBJLEN", key]; + + if (options) { + args.push(options.path); + } + + return _executeCommand>(client, args); + } + + /** + * Retrieves key names in the object values at the specified `path` within the JSON document stored at `key`. + * + * @param client - The client to execute the command. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document where the key names will be retrieved. Defaults to root (`"."`) if not provided. + * @returns ReturnTypeJson: + * - For JSONPath (`path` starts with `$`): + * - Returns a list of arrays containing key names for each matching object. + * - If a value matching the path is not an object, an empty array is returned. + * - If `path` doesn't exist, an empty array is returned. + * - For legacy path (`path` starts with `.`): + * - Returns a list of key names for the object value matching the path. + * - If multiple objects match the path, the key names of the first object is returned. + * - If a value matching the path is not an object, an error is raised. + * - If `path` doesn't exist, `null` is returned. + * - If `key` doesn't exist, `null` is returned. + * + * @example + * ```typescript + * console.log(await GlideJson.set(client, "doc", "$", '{"a": 1.0, "b": {"a": {"x": 1, "y": 2}, "b": 2.5, "c": true}}')); + * // Output: 'OK' - Indicates successful setting of the value at the root path '$' in the key `doc`. + * console.log(await GlideJson.objkeys(client, "doc", { path: "$" })); + * // Output: [["a", "b"]] - Returns a list of arrays containing the key names for objects matching the path '$'. + * console.log(await GlideJson.objkeys(client, "doc", { path: "." })); + * // Output: ["a", "b"] - Returns key names for the object matching the path '.' as it is the only match. + * ``` + */ + static async objkeys( + client: BaseClient, + key: GlideString, + options?: { path: GlideString } & DecoderOption, + ): Promise> { + const args = ["JSON.OBJKEYS", key]; + + if (options) { + args.push(options.path); + } + + return _executeCommand(client, args, options); + } } diff --git a/node/tests/GlideClient.test.ts b/node/tests/GlideClient.test.ts index e1043c0657..61187615d5 100644 --- a/node/tests/GlideClient.test.ts +++ b/node/tests/GlideClient.test.ts @@ -13,7 +13,6 @@ import { import { BufferReader, BufferWriter } from "protobufjs"; import { v4 as uuidv4 } from "uuid"; import { - convertGlideRecordToRecord, Decoder, FlushMode, FunctionRestorePolicy, @@ -25,28 +24,27 @@ import { RequestError, Script, Transaction, + convertGlideRecordToRecord, } from ".."; import { ValkeyCluster } from "../../utils/TestUtils.js"; import { command_request } from "../src/ProtobufMessage"; import { runBaseTests } from "./SharedTests"; import { + DumpAndRestoreTest, checkFunctionListResponse, checkFunctionStatsResponse, convertStringArrayToBuffer, createLongRunningLuaScript, createLuaLibWithLongRunningFunction, - DumpAndRestoreTest, encodableTransactionTest, flushAndCloseClient, generateLuaLibCode, getClientConfigurationOption, getServerVersion, - parseCommandLineArgs, parseEndpoints, transactionTest, validateTransactionResponse, waitForNotBusy, - waitForScriptNotBusy, } from "./TestUtilities"; const TIMEOUT = 50000; @@ -56,8 +54,7 @@ describe("GlideClient", () => { let cluster: ValkeyCluster; let client: GlideClient; beforeAll(async () => { - const standaloneAddresses = - parseCommandLineArgs()["standalone-endpoints"]; + const standaloneAddresses = global.STAND_ALONE_ENDPOINT; cluster = standaloneAddresses ? await ValkeyCluster.initFromExistingCluster( false, @@ -1462,70 +1459,6 @@ describe("GlideClient", () => { TIMEOUT, ); - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "script kill killable test_%p", - async (protocol) => { - const config = getClientConfigurationOption( - cluster.getAddresses(), - protocol, - { requestTimeout: 10000 }, - ); - const client1 = await GlideClient.createClient(config); - const client2 = await GlideClient.createClient(config); - - try { - // Verify that script kill raises an error when no script is running - await expect(client1.scriptKill()).rejects.toThrow( - "No scripts in execution right now", - ); - - // Create a long-running script - const longScript = new Script( - createLongRunningLuaScript(5, false), - ); - - try { - // call the script without await - const promise = client2 - .invokeScript(longScript) - .catch((e) => - expect((e as Error).message).toContain( - "Script killed", - ), - ); - - let killed = false; - let timeout = 4000; - await new Promise((resolve) => setTimeout(resolve, 1000)); - - while (timeout >= 0) { - try { - expect(await client1.scriptKill()).toEqual("OK"); - killed = true; - break; - } catch { - // do nothing - } - - await new Promise((resolve) => - setTimeout(resolve, 500), - ); - timeout -= 500; - } - - expect(killed).toBeTruthy(); - await promise; - } finally { - await waitForScriptNotBusy(client1); - } - } finally { - expect(await client1.scriptFlush()).toEqual("OK"); - client1.close(); - client2.close(); - } - }, - ); - it.each([ [ProtocolVersion.RESP2, 5], [ProtocolVersion.RESP2, 100], diff --git a/node/tests/GlideClusterClient.test.ts b/node/tests/GlideClusterClient.test.ts index 75ac0e6f1d..c8782ffbc7 100644 --- a/node/tests/GlideClusterClient.test.ts +++ b/node/tests/GlideClusterClient.test.ts @@ -15,7 +15,6 @@ import { v4 as uuidv4 } from "uuid"; import { BitwiseOperation, ClusterTransaction, - convertRecordToGlideRecord, Decoder, FlushMode, FunctionListResponse, @@ -34,6 +33,7 @@ import { Script, SlotKeyTypes, SortOrder, + convertRecordToGlideRecord, } from ".."; import { ValkeyCluster } from "../../utils/TestUtils.js"; import { runBaseTests } from "./SharedTests"; @@ -50,12 +50,10 @@ import { getServerVersion, intoArray, intoString, - parseCommandLineArgs, parseEndpoints, transactionTest, validateTransactionResponse, waitForNotBusy, - waitForScriptNotBusy, } from "./TestUtilities"; const TIMEOUT = 50000; @@ -65,7 +63,7 @@ describe("GlideClusterClient", () => { let cluster: ValkeyCluster; let client: GlideClusterClient; beforeAll(async () => { - const clusterAddresses = parseCommandLineArgs()["cluster-endpoints"]; + const clusterAddresses = global.CLUSTER_ENDPOINTS; // Connect to cluster or create a new one based on the parsed addresses cluster = clusterAddresses ? await ValkeyCluster.initFromExistingCluster( @@ -248,7 +246,7 @@ describe("GlideClusterClient", () => { expect(await client.set(key, value)).toEqual("OK"); // Since DUMP gets binary results, we cannot use the default decoder (string) here, so we expected to get an error. await expect(client.customCommand(["DUMP", key])).rejects.toThrow( - "invalid utf-8 sequence of 1 bytes from index", + "invalid utf-8 sequence", ); const dumpResult = await client.customCommand(["DUMP", key], { @@ -1930,71 +1928,6 @@ describe("GlideClusterClient", () => { TIMEOUT, ); - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "script kill killable test_%p", - async (protocol) => { - const config = getClientConfigurationOption( - cluster.getAddresses(), - protocol, - { requestTimeout: 10000 }, - ); - const client1 = await GlideClusterClient.createClient(config); - const client2 = await GlideClusterClient.createClient(config); - - try { - // Verify that script kill raises an error when no script is running - await expect(client1.scriptKill()).rejects.toThrow( - "No scripts in execution right now", - ); - - // Create a long-running script - const longScript = new Script( - createLongRunningLuaScript(5, false), - ); - - try { - // call the script without await - const promise = client2 - .invokeScript(longScript) - .catch((e) => - expect((e as Error).message).toContain( - "Script killed", - ), - ); - - let killed = false; - let timeout = 4000; - await new Promise((resolve) => setTimeout(resolve, 1000)); - - while (timeout >= 0) { - try { - expect(await client1.scriptKill()).toEqual("OK"); - killed = true; - break; - } catch { - // do nothing - } - - await new Promise((resolve) => - setTimeout(resolve, 500), - ); - timeout -= 500; - } - - expect(killed).toBeTruthy(); - await promise; - } finally { - await waitForScriptNotBusy(client1); - } - } finally { - expect(await client1.scriptFlush()).toEqual("OK"); - client1.close(); - client2.close(); - } - }, - TIMEOUT, - ); - it.each([ [ProtocolVersion.RESP2, 5], [ProtocolVersion.RESP2, 100], diff --git a/node/tests/PubSub.test.ts b/node/tests/PubSub.test.ts index 8e87b7ee1c..9463f11b8f 100644 --- a/node/tests/PubSub.test.ts +++ b/node/tests/PubSub.test.ts @@ -28,7 +28,6 @@ import ValkeyCluster from "../../utils/TestUtils"; import { flushAndCloseClient, getServerVersion, - parseCommandLineArgs, parseEndpoints, } from "./TestUtilities"; @@ -60,9 +59,8 @@ describe("PubSub", () => { let cmeCluster: ValkeyCluster; let cmdCluster: ValkeyCluster; beforeAll(async () => { - const standaloneAddresses = - parseCommandLineArgs()["standalone-endpoints"]; - const clusterAddresses = parseCommandLineArgs()["cluster-endpoints"]; + const standaloneAddresses = global.STAND_ALONE_ENDPOINT; + const clusterAddresses = global.CLUSTER_ENDPOINTS; // Connect to cluster or create a new one based on the parsed addresses cmdCluster = standaloneAddresses ? await ValkeyCluster.initFromExistingCluster( diff --git a/node/tests/ScanTest.test.ts b/node/tests/ScanTest.test.ts index 5c975cacdc..bff90bab36 100644 --- a/node/tests/ScanTest.test.ts +++ b/node/tests/ScanTest.test.ts @@ -18,7 +18,6 @@ import { flushAndCloseClient, getClientConfigurationOption, getServerVersion, - parseCommandLineArgs, parseEndpoints, } from "./TestUtilities"; @@ -30,7 +29,7 @@ describe("Scan GlideClusterClient", () => { let cluster: ValkeyCluster; let client: GlideClusterClient; beforeAll(async () => { - const clusterAddresses = parseCommandLineArgs()["cluster-endpoints"]; + const clusterAddresses = global.CLUSTER_ENDPOINTS; // Connect to cluster or create a new one based on the parsed addresses cluster = clusterAddresses ? await ValkeyCluster.initFromExistingCluster( @@ -385,8 +384,7 @@ describe("Scan GlideClient", () => { let cluster: ValkeyCluster; let client: GlideClient; beforeAll(async () => { - const standaloneAddresses = - parseCommandLineArgs()["standalone-endpoints"]; + const standaloneAddresses = global.STAND_ALONE_ENDPOINT; cluster = standaloneAddresses ? await ValkeyCluster.initFromExistingCluster( false, diff --git a/node/tests/ServerModules.test.ts b/node/tests/ServerModules.test.ts index 9d8c0b77a0..35b2d82487 100644 --- a/node/tests/ServerModules.test.ts +++ b/node/tests/ServerModules.test.ts @@ -12,7 +12,9 @@ import { import { v4 as uuidv4 } from "uuid"; import { ConditionalChange, + convertGlideRecordToRecord, Decoder, + FtAggregateOptions, FtSearchReturnType, GlideClusterClient, GlideFt, @@ -21,6 +23,7 @@ import { JsonGetOptions, ProtocolVersion, RequestError, + SortOrder, VectorField, } from ".."; import { ValkeyCluster } from "../../utils/TestUtils"; @@ -28,18 +31,18 @@ import { flushAndCloseClient, getClientConfigurationOption, getServerVersion, - parseCommandLineArgs, parseEndpoints, } from "./TestUtilities"; const TIMEOUT = 50000; +/** Waiting interval to let server process the data before querying */ const DATA_PROCESSING_TIMEOUT = 1000; describe("Server Module Tests", () => { let cluster: ValkeyCluster; beforeAll(async () => { - const clusterAddresses = parseCommandLineArgs()["cluster-endpoints"]; + const clusterAddresses = global.CLUSTER_ENDPOINTS; cluster = await ValkeyCluster.initFromExistingCluster( true, parseEndpoints(clusterAddresses), @@ -245,6 +248,15 @@ describe("Server Module Tests", () => { const expectedResult2 = '[\n~{\n~~"a":*1,\n~~"b":*2,\n~~"c":*{\n~~~"d":*3,\n~~~"e":*4\n~~}\n~}\n]'; expect(result).toEqual(expectedResult2); + + // binary buffer test + const result3 = await GlideJson.get(client, Buffer.from(key), { + path: Buffer.from("$"), + indent: Buffer.from("~"), + newline: Buffer.from("\n"), + space: Buffer.from("*"), + } as JsonGetOptions); + expect(result3).toEqual(expectedResult2); }); it("json.arrinsert", async () => { @@ -351,6 +363,16 @@ describe("Server Module Tests", () => { expect( JSON.parse((await GlideJson.get(client, key)) as string), ).toEqual(expected); + + // Binary buffer test + expect( + JSON.parse( + (await GlideJson.get( + client, + Buffer.from(key), + )) as string, + ), + ).toEqual(expected); }); it("json.arrpop", async () => { @@ -401,6 +423,11 @@ describe("Server Module Tests", () => { "OK", ); expect(await GlideJson.arrpop(client, key)).toEqual("42"); + + // Binary buffer test + expect( + await GlideJson.arrpop(client, Buffer.from(key)), + ).toEqual("[3,4]"); }); it("json.arrlen", async () => { @@ -439,6 +466,175 @@ describe("Server Module Tests", () => { await GlideJson.set(client, key, "$", "[1, 2, 3, 4]"), ).toBe("OK"); expect(await GlideJson.arrlen(client, key)).toEqual(4); + + // Binary buffer test + expect( + await GlideJson.arrlen(client, Buffer.from(key)), + ).toEqual(4); + }); + + it("json.arrindex", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + + const key1 = uuidv4(); + const key2 = uuidv4(); + const doc1 = + '{"a": [1, 3, true, "hello"], "b": {"a": [3, 4, [3, false], 5], "c": {"a": 42}}}'; + + expect(await GlideJson.set(client, key1, "$", doc1)).toBe("OK"); + + // Verify scalar type + expect( + await GlideJson.arrindex(client, key1, "$..a", true), + ).toEqual([2, -1, null]); + expect( + await GlideJson.arrindex(client, key1, "..a", true), + ).toEqual(2); + + expect( + await GlideJson.arrindex(client, key1, "$..a", 3), + ).toEqual([1, 0, null]); + expect( + await GlideJson.arrindex(client, key1, "..a", 3), + ).toEqual(1); + + expect( + await GlideJson.arrindex(client, key1, "$..a", '"hello"'), + ).toEqual([3, -1, null]); + expect( + await GlideJson.arrindex(client, key1, "..a", '"hello"'), + ).toEqual(3); + + expect( + await GlideJson.arrindex(client, key1, "$..a", null), + ).toEqual([-1, -1, null]); + expect( + await GlideJson.arrindex(client, key1, "..a", null), + ).toEqual(-1); + + // Value at the path is not an array + expect( + await GlideJson.arrindex(client, key1, "$..c", 42), + ).toEqual([null]); + await expect( + GlideJson.arrindex(client, key1, "..c", 42), + ).rejects.toThrow(RequestError); + + const doc2 = + '{"a": [1, 3, true, "foo", "meow", "m", "foo", "lol", false],' + + ' "b": {"a": [3, 4, ["value", 3, false], 5], "c": {"a": 42}}}'; + + expect(await GlideJson.set(client, key2, "$", doc2)).toBe("OK"); + + // Verify optional `start` and `end` + expect( + await GlideJson.arrindex(client, key2, "$..a", '"foo"', { + start: 6, + end: 8, + }), + ).toEqual([6, -1, null]); + expect( + await GlideJson.arrindex(client, key2, "$..a", '"foo"', { + start: 2, + end: 8, + }), + ).toEqual([3, -1, null]); + expect( + await GlideJson.arrindex(client, key2, "..a", '"meow"', { + start: 2, + end: 8, + }), + ).toEqual(4); + + // Verify without optional `end` + expect( + await GlideJson.arrindex(client, key2, "$..a", '"foo"', { + start: 6, + }), + ).toEqual([6, -1, null]); + expect( + await GlideJson.arrindex(client, key2, "..a", '"foo"', { + start: 6, + }), + ).toEqual(6); + + // Verify optional `end` with 0 or -1 (means the last element is included) + expect( + await GlideJson.arrindex(client, key2, "$..a", '"foo"', { + start: 6, + end: 0, + }), + ).toEqual([6, -1, null]); + expect( + await GlideJson.arrindex(client, key2, "..a", '"foo"', { + start: 6, + end: 0, + }), + ).toEqual(6); + expect( + await GlideJson.arrindex(client, key2, "$..a", '"foo"', { + start: 6, + end: -1, + }), + ).toEqual([6, -1, null]); + expect( + await GlideJson.arrindex(client, key2, "..a", '"foo"', { + start: 6, + end: -1, + }), + ).toEqual(6); + + // Test with binary input + expect( + await GlideJson.arrindex( + client, + Buffer.from(key2), + Buffer.from("$..a"), + Buffer.from('"foo"'), + { + start: 6, + end: -1, + }, + ), + ).toEqual([6, -1, null]); + expect( + await GlideJson.arrindex( + client, + Buffer.from(key2), + Buffer.from("..a"), + Buffer.from('"foo"'), + { + start: 6, + end: -1, + }, + ), + ).toEqual(6); + + // Test with non-existent path + expect( + await GlideJson.arrindex( + client, + key2, + "$.nonexistent", + true, + ), + ).toEqual([]); + await expect( + GlideJson.arrindex(client, key2, "nonexistent", true), + ).rejects.toThrow(RequestError); + + // Test with non-existent key + await expect( + GlideJson.arrindex(client, "non_existing_key", "$", true), + ).rejects.toThrow(RequestError); + await expect( + GlideJson.arrindex(client, "non_existing_key", ".", true), + ).rejects.toThrow(RequestError); }); it("json.toggle tests", async () => { @@ -494,6 +690,11 @@ describe("Server Module Tests", () => { await expect( GlideJson.toggle(client, "non_existing_key", { path: "$" }), ).rejects.toThrow(RequestError); + + // Binary buffer test + expect(await GlideJson.toggle(client, Buffer.from(key2))).toBe( + false, + ); }); it("json.del tests", async () => { @@ -582,6 +783,9 @@ describe("Server Module Tests", () => { await GlideJson.get(client, key, { path: "$" }), ).toBeNull(); + // Binary buffer test + expect(await GlideJson.del(client, Buffer.from(key))).toBe(0); + // non-existing keys expect( await GlideJson.del(client, "non_existing_key", { @@ -685,6 +889,11 @@ describe("Server Module Tests", () => { await GlideJson.get(client, key, { path: "$" }), ).toBeNull(); + // Binary buffer test + expect(await GlideJson.forget(client, Buffer.from(key))).toBe( + 0, + ); + // non-existing keys expect( await GlideJson.forget(client, "non_existing_key", { @@ -761,8 +970,168 @@ describe("Server Module Tests", () => { expect( await GlideJson.type(client, "non_existing", { path: "." }), ).toBeNull(); + + // Binary buffer test + expect( + await GlideJson.type(client, Buffer.from(key2), { + path: Buffer.from(".Age"), + }), + ).toEqual("integer"); }); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "json.clear tests", + async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = { + obj: { a: 1, b: 2 }, + arr: [1, 2, 3], + str: "foo", + bool: true, + int: 42, + float: 3.14, + nullVal: null, + }; + + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); + + expect( + await GlideJson.clear(client, key, { path: "$.*" }), + ).toBe(6); + + const result = await GlideJson.get(client, key, { + path: ["$"], + }); + + expect(JSON.parse(result as string)).toEqual([ + { + obj: {}, + arr: [], + str: "", + bool: false, + int: 0, + float: 0.0, + nullVal: null, + }, + ]); + + expect( + await GlideJson.clear(client, key, { path: "$.*" }), + ).toBe(0); + + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); + + expect( + await GlideJson.clear(client, key, { path: "*" }), + ).toBe(6); + + const jsonValue2 = { + a: 1, + b: { a: [5, 6, 7], b: { a: true } }, + c: { a: "value", b: { a: 3.5 } }, + d: { a: { foo: "foo" } }, + nullVal: null, + }; + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue2), + ), + ).toBe("OK"); + + expect( + await GlideJson.clear(client, key, { + path: "b.a[1:3]", + }), + ).toBe(2); + + expect( + await GlideJson.clear(client, key, { + path: "b.a[1:3]", + }), + ).toBe(0); + + expect( + JSON.parse( + (await GlideJson.get(client, key, { + path: ["$..a"], + })) as string, + ), + ).toEqual([ + 1, + [5, 0, 0], + true, + "value", + 3.5, + { foo: "foo" }, + ]); + + expect( + await GlideJson.clear(client, key, { path: "..a" }), + ).toBe(6); + + expect( + JSON.parse( + (await GlideJson.get(client, key, { + path: ["$..a"], + })) as string, + ), + ).toEqual([0, [], false, "", 0.0, {}]); + + expect( + await GlideJson.clear(client, key, { path: "$..a" }), + ).toBe(0); + + // Path doesn't exist + expect( + await GlideJson.clear(client, key, { path: "$.path" }), + ).toBe(0); + + expect( + await GlideJson.clear(client, key, { path: "path" }), + ).toBe(0); + + // Key doesn't exist + await expect( + GlideJson.clear(client, "non_existing_key"), + ).rejects.toThrow(RequestError); + + await expect( + GlideJson.clear(client, "non_existing_key", { + path: "$", + }), + ).rejects.toThrow(RequestError); + + await expect( + GlideJson.clear(client, "non_existing_key", { + path: ".", + }), + ).rejects.toThrow(RequestError); + }, + ); + it("json.resp tests", async () => { client = await GlideClusterClient.createClient( getClientConfigurationOption( @@ -887,6 +1256,13 @@ describe("Server Module Tests", () => { expect( await GlideJson.resp(client, "nonexistent_key"), ).toBeNull(); + + // binary buffer test + expect( + await GlideJson.resp(client, Buffer.from(key), { + path: Buffer.from("..a"), + }), + ).toEqual(["[", 1, 2, 3]); }); it("json.arrtrim tests", async () => { @@ -1022,75 +1398,685 @@ describe("Server Module Tests", () => { ).toEqual(0); }); - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "json.strlen tests", - async (protocol) => { - client = await GlideClusterClient.createClient( - getClientConfigurationOption( - cluster.getAddresses(), - protocol, - ), - ); - const key = uuidv4(); - const jsonValue = { - a: "foo", - nested: { a: "hello" }, - nested2: { a: 31 }, - }; - // setup - expect( - await GlideJson.set( - client, - key, - "$", - JSON.stringify(jsonValue), - ), - ).toBe("OK"); - - expect( - await GlideJson.strlen(client, key, { path: "$..a" }), - ).toEqual([3, 5, null]); - expect( - await GlideJson.strlen(client, key, { path: "a" }), - ).toBe(3); + it("json.strlen tests", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = { + a: "foo", + nested: { a: "hello" }, + nested2: { a: 31 }, + }; + // setup + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); - expect( - await GlideJson.strlen(client, key, { - path: "$.nested", - }), - ).toEqual([null]); - expect( - await GlideJson.strlen(client, key, { path: "$..a" }), - ).toEqual([3, 5, null]); + expect( + await GlideJson.strlen(client, key, { path: "$..a" }), + ).toEqual([3, 5, null]); + expect(await GlideJson.strlen(client, key, { path: "a" })).toBe( + 3, + ); - expect( - await GlideJson.strlen(client, "non_existing_key", { - path: ".", - }), - ).toBeNull(); - expect( - await GlideJson.strlen(client, "non_existing_key", { - path: "$", - }), - ).toBeNull(); - expect( - await GlideJson.strlen(client, key, { - path: "$.non_existing_path", - }), - ).toEqual([]); + expect( + await GlideJson.strlen(client, key, { + path: "$.nested", + }), + ).toEqual([null]); + expect( + await GlideJson.strlen(client, key, { path: "$..a" }), + ).toEqual([3, 5, null]); - // error case - await expect( - GlideJson.strlen(client, key, { path: "nested" }), - ).rejects.toThrow(RequestError); - await expect(GlideJson.strlen(client, key)).rejects.toThrow( - RequestError, + expect( + await GlideJson.strlen(client, "non_existing_key", { + path: ".", + }), + ).toBeNull(); + expect( + await GlideJson.strlen(client, "non_existing_key", { + path: "$", + }), + ).toBeNull(); + expect( + await GlideJson.strlen(client, key, { + path: "$.non_existing_path", + }), + ).toEqual([]); + + // error case + await expect( + GlideJson.strlen(client, key, { path: "nested" }), + ).rejects.toThrow(RequestError); + await expect(GlideJson.strlen(client, key)).rejects.toThrow( + RequestError, + ); + // Binary buffer test + expect( + await GlideJson.strlen(client, Buffer.from(key), { + path: Buffer.from("$..a"), + }), + ).toEqual([3, 5, null]); + }); + + it("json.arrappend", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + let doc = { a: 1, b: ["one", "two"] }; + expect( + await GlideJson.set(client, key, "$", JSON.stringify(doc)), + ).toBe("OK"); + + expect( + await GlideJson.arrappend(client, key, Buffer.from("$.b"), [ + '"three"', + ]), + ).toEqual([3]); + expect( + await GlideJson.arrappend(client, key, ".b", [ + '"four"', + '"five"', + ]), + ).toEqual(5); + doc = JSON.parse( + (await GlideJson.get(client, key, { path: "." })) as string, + ); + expect(doc).toEqual({ + a: 1, + b: ["one", "two", "three", "four", "five"], + }); + + expect( + await GlideJson.arrappend(client, key, "$.a", ['"value"']), + ).toEqual([null]); + }); + + it("json.strappend tests", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = { + a: "foo", + nested: { a: "hello" }, + nested2: { a: 31 }, + }; + // setup + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); + + expect( + await GlideJson.strappend(client, key, '"bar"', { + path: "$..a", + }), + ).toEqual([6, 8, null]); + expect( + await GlideJson.strappend( + client, + key, + JSON.stringify("foo"), + { + path: "a", + }, + ), + ).toBe(9); + + expect(await GlideJson.get(client, key, { path: "." })).toEqual( + JSON.stringify({ + a: "foobarfoo", + nested: { a: "hellobar" }, + nested2: { a: 31 }, + }), + ); + + // Binary buffer test + expect( + await GlideJson.strappend( + client, + Buffer.from(key), + Buffer.from(JSON.stringify("foo")), + { + path: Buffer.from("a"), + }, + ), + ).toBe(12); + + expect( + await GlideJson.strappend( + client, + key, + JSON.stringify("bar"), + { + path: "$.nested", + }, + ), + ).toEqual([null]); + + await expect( + GlideJson.strappend(client, key, JSON.stringify("bar"), { + path: ".nested", + }), + ).rejects.toThrow(RequestError); + await expect( + GlideJson.strappend(client, key, JSON.stringify("bar")), + ).rejects.toThrow(RequestError); + + expect( + await GlideJson.strappend( + client, + key, + JSON.stringify("try"), + { + path: "$.non_existing_path", + }, + ), + ).toEqual([]); + + await expect( + GlideJson.strappend(client, key, JSON.stringify("try"), { + path: ".non_existing_path", + }), + ).rejects.toThrow(RequestError); + await expect( + GlideJson.strappend( + client, + "non_existing_key", + JSON.stringify("try"), + ), + ).rejects.toThrow(RequestError); + }); + + it("json.numincrby tests", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = { + key1: 1, + key2: 3.5, + key3: { nested_key: { key1: [4, 5] } }, + key4: [1, 2, 3], + key5: 0, + key6: "hello", + key7: null, + key8: { nested_key: { key1: 69 } }, + key9: 1.7976931348623157e308, + }; + // setup + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); + + // Increment integer value (key1) by 5 + expect( + await GlideJson.numincrby(client, key, "$.key1", 5), + ).toBe("[6]"); // 1 + 5 = 6 + + // Increment float value (key2) by 2.5 + expect( + await GlideJson.numincrby(client, key, "$.key2", 2.5), + ).toBe("[6]"); // 3.5 + 2.5 = 6 + + // Increment nested object (key3.nested_key.key1[0]) by 7 + expect( + await GlideJson.numincrby( + client, + key, + "$.key3.nested_key.key1[1]", + 7, + ), + ).toBe("[12]"); // 4 + 7 = 12 + + // Increment array element (key4[1]) by 1 + expect( + await GlideJson.numincrby(client, key, "$.key4[1]", 1), + ).toBe("[3]"); // 2 + 1 = 3 + + // Increment zero value (key5) by 10.23 (float number) + expect( + await GlideJson.numincrby(client, key, "$.key5", 10.23), + ).toBe("[10.23]"); // 0 + 10.23 = 10.23 + + // Increment a string value (key6) by a number + expect( + await GlideJson.numincrby(client, key, "$.key6", 99), + ).toBe("[null]"); // null + + // Increment a None value (key7) by a number + expect( + await GlideJson.numincrby(client, key, "$.key7", 51), + ).toBe("[null]"); // null + + // Check increment for all numbers in the document using JSON Path (First Null: key3 as an entire object. Second Null: The path checks under key3, which is an object, for numeric values). + expect(await GlideJson.numincrby(client, key, "$..*", 5)).toBe( + "[11,11,null,null,15.23,null,null,null,1.7976931348623157e+308,null,null,9,17,6,8,8,null,74]", + ); + + // Check for multiple path match in enhanced + expect( + await GlideJson.numincrby(client, key, "$..key1", 1), + ).toBe("[12,null,75]"); + + // Check for non existent path in JSONPath + expect( + await GlideJson.numincrby(client, key, "$.key10", 51), + ).toBe("[]"); // empty array + + // Check for non existent key in JSONPath + await expect( + GlideJson.numincrby( + client, + "non_existing_key", + "$.key10", + 51, + ), + ).rejects.toThrow(RequestError); + + // Check for Overflow in JSONPath + await expect( + GlideJson.numincrby( + client, + key, + "$.key9", + 1.7976931348623157e308, + ), + ).rejects.toThrow(RequestError); + + // Decrement integer value (key1) by 12 + expect( + await GlideJson.numincrby(client, key, "$.key1", -12), + ).toBe("[0]"); // 12 - 12 = 0 + + // Decrement integer value (key1) by 0.5 + expect( + await GlideJson.numincrby(client, key, "$.key1", -0.5), + ).toBe("[-0.5]"); // 0 - 0.5 = -0.5 + + // Test Legacy Path + // Increment float value (key1) by 5 (integer) + expect(await GlideJson.numincrby(client, key, "key1", 5)).toBe( + "4.5", + ); // -0.5 + 5 = 4.5 + + // Decrement float value (key1) by 5.5 (integer) + expect( + await GlideJson.numincrby(client, key, "key1", -5.5), + ).toBe("-1"); // 4.5 - 5.5 = -1 + + // Increment int value (key2) by 2.5 (a float number) + expect( + await GlideJson.numincrby(client, key, "key2", 2.5), + ).toBe("13.5"); // 11 + 2.5 = 13.5 + + // Increment nested value (key3.nested_key.key1[0]) by 7 + expect( + await GlideJson.numincrby( + client, + key, + "key3.nested_key.key1[0]", + 7, + ), + ).toBe("16"); // 9 + 7 = 16 + + // Increment array element (key4[1]) by 1 + expect( + await GlideJson.numincrby(client, key, "key4[1]", 1), + ).toBe("9"); // 8 + 1 = 9 + + // Increment a float value (key5) by 10.2 (a float number) + expect( + await GlideJson.numincrby(client, key, "key5", 10.2), + ).toBe("25.43"); // 15.23 + 10.2 = 25.43 + + // Check for multiple path match in legacy and assure that the result of the last updated value is returned + expect( + await GlideJson.numincrby(client, key, "..key1", 1), + ).toBe("76"); + + // Check if the rest of the key1 path matches were updated and not only the last value + expect( + await GlideJson.get(client, key, { path: "$..key1" }), + ).toBe("[0,[16,17],76]"); + // First is 0 as 0 + 0 = 0, Second doesn't change as its an array type (non-numeric), third is 76 as 0 + 76 = 0 + + // Check for non existent path in legacy + await expect( + GlideJson.numincrby(client, key, ".key10", 51), + ).rejects.toThrow(RequestError); + + // Check for non existent key in legacy + await expect( + GlideJson.numincrby( + client, + "non_existent_key", + ".key10", + 51, + ), + ).rejects.toThrow(RequestError); + + // Check for Overflow in legacy + await expect( + GlideJson.numincrby( + client, + key, + ".key9", + 1.7976931348623157e308, + ), + ).rejects.toThrow(RequestError); + + // binary buffer test + expect( + await GlideJson.numincrby( + client, + Buffer.from(key), + Buffer.from("key5"), + 1, + ), + ).toBe("26.43"); + }); + + it("json.nummultiby tests", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = + "{" + + ' "key1": 1,' + + ' "key2": 3.5,' + + ' "key3": {"nested_key": {"key1": [4, 5]}},' + + ' "key4": [1, 2, 3],' + + ' "key5": 0,' + + ' "key6": "hello",' + + ' "key7": null,' + + ' "key8": {"nested_key": {"key1": 69}},' + + ' "key9": 3.5953862697246314e307' + + "}"; + // setup + expect(await GlideJson.set(client, key, "$", jsonValue)).toBe( + "OK", + ); + + // Test JSONPath + // Multiply integer value (key1) by 5 + expect( + await GlideJson.nummultby(client, key, "$.key1", 5), + ).toBe("[5]"); // 1 * 5 = 5 + + // Multiply float value (key2) by 2.5 + expect( + await GlideJson.nummultby(client, key, "$.key2", 2.5), + ).toBe("[8.75]"); // 3.5 * 2.5 = 8.75 + + // Multiply nested object (key3.nested_key.key1[1]) by 7 + expect( + await GlideJson.nummultby( + client, + key, + "$.key3.nested_key.key1[1]", + 7, + ), + ).toBe("[35]"); // 5 * 7 = 5 + + // Multiply array element (key4[1]) by 1 + expect( + await GlideJson.nummultby(client, key, "$.key4[1]", 1), + ).toBe("[2]"); // 2 * 1 = 2 + + // Multiply zero value (key5) by 10.23 (float number) + expect( + await GlideJson.nummultby(client, key, "$.key5", 10.23), + ).toBe("[0]"); // 0 * 10.23 = 0 + + // Multiply a string value (key6) by a number + expect( + await GlideJson.nummultby(client, key, "$.key6", 99), + ).toBe("[null]"); + + // Multiply a None value (key7) by a number + expect( + await GlideJson.nummultby(client, key, "$.key7", 51), + ).toBe("[null]"); + + // Check multiplication for all numbers in the document using JSON Path + // key1: 5 * 5 = 25 + // key2: 8.75 * 5 = 43.75 + // key3.nested_key.key1[0]: 4 * 5 = 20 + // key3.nested_key.key1[1]: 35 * 5 = 175 + // key4[0]: 1 * 5 = 5 + // key4[1]: 2 * 5 = 10 + // key4[2]: 3 * 5 = 15 + // key5: 0 * 5 = 0 + // key8.nested_key.key1: 69 * 5 = 345 + // key9: 3.5953862697246314e307 * 5 = 1.7976931348623157e308 + expect(await GlideJson.nummultby(client, key, "$..*", 5)).toBe( + "[25,43.75,null,null,0,null,null,null,1.7976931348623157e+308,null,null,20,175,5,10,15,null,345]", + ); + + // Check for multiple path matches in JSONPath + // key1: 25 * 2 = 50 + // key8.nested_key.key1: 345 * 2 = 690 + expect( + await GlideJson.nummultby(client, key, "$..key1", 2), + ).toBe("[50,null,690]"); // After previous multiplications + + // Check for non-existent path in JSONPath + expect( + await GlideJson.nummultby(client, key, "$.key10", 51), + ).toBe("[]"); // Empty Array + + // Check for non-existent key in JSONPath + await expect( + GlideJson.numincrby( + client, + "non_existent_key", + "$.key10", + 51, + ), + ).rejects.toThrow(RequestError); + + // Check for Overflow in JSONPath + await expect( + GlideJson.numincrby( + client, + key, + "$.key9", + 1.7976931348623157e308, + ), + ).rejects.toThrow(RequestError); + + // Multiply integer value (key1) by -12 + expect( + await GlideJson.nummultby(client, key, "$.key1", -12), + ).toBe("[-600]"); // 50 * -12 = -600 + + // Multiply integer value (key1) by -0.5 + expect( + await GlideJson.nummultby(client, key, "$.key1", -0.5), + ).toBe("[300]"); // -600 * -0.5 = 300 + + // Test Legacy Path + // Multiply int value (key1) by 5 (integer) + expect(await GlideJson.nummultby(client, key, "key1", 5)).toBe( + "1500", + ); // 300 * 5 = -1500 + + // Multiply int value (key1) by -5.5 (float number) + expect( + await GlideJson.nummultby(client, key, "key1", -5.5), + ).toBe("-8250"); // -150 * -5.5 = -8250 + + // Multiply int float (key2) by 2.5 (a float number) + expect( + await GlideJson.nummultby(client, key, "key2", 2.5), + ).toBe("109.375"); // 109.375 + + // Multiply nested value (key3.nested_key.key1[0]) by 7 + expect( + await GlideJson.nummultby( + client, + key, + "key3.nested_key.key1[0]", + 7, + ), + ).toBe("140"); // 20 * 7 = 140 + + // Multiply array element (key4[1]) by 1 + expect( + await GlideJson.nummultby(client, key, "key4[1]", 1), + ).toBe("10"); // 10 * 1 = 10 + + // Multiply a float value (key5) by 10.2 (a float number) + expect( + await GlideJson.nummultby(client, key, "key5", 10.2), + ).toBe("0"); // 0 * 10.2 = 0 + + // Check for multiple path matches in legacy and assure that the result of the last updated value is returned + // last updated value is key8.nested_key.key1: 690 * 2 = 1380 + expect( + await GlideJson.nummultby(client, key, "..key1", 2), + ).toBe("1380"); // the last updated key1 value multiplied by 2 + + // Check if the rest of the key1 path matches were updated and not only the last value + expect( + await GlideJson.get(client, key, { path: "$..key1" }), + ).toBe("[-16500,[140,175],1380]"); + + // Check for non-existent path in legacy + await expect( + GlideJson.numincrby(client, key, ".key10", 51), + ).rejects.toThrow(RequestError); + + // Check for non-existent key in legacy + await expect( + GlideJson.numincrby( + client, + "non_existent_key", + ".key10", + 51, + ), + ).rejects.toThrow(RequestError); + + // Check for Overflow in legacy + await expect( + GlideJson.numincrby( + client, + key, + ".key9", + 1.7976931348623157e308, + ), + ).rejects.toThrow(RequestError); + + // binary buffer tests + expect( + await GlideJson.nummultby( + client, + Buffer.from(key), + Buffer.from("key5"), + 10.2, + ), + ).toBe("0"); // 0 * 10.2 = 0 + }); + + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "json.debug tests", + async (protocol) => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), ); + const key = uuidv4(); + const jsonValue = + '{ "key1": 1, "key2": 3.5, "key3": {"nested_key": {"key1": [4, 5]}}, "key4":' + + ' [1, 2, 3], "key5": 0, "key6": "hello", "key7": null, "key8":' + + ' {"nested_key": {"key1": 3.5953862697246314e307}}, "key9":' + + ' 3.5953862697246314e307, "key10": true }'; + // setup + expect( + await GlideJson.set(client, key, "$", jsonValue), + ).toBe("OK"); + + expect( + await GlideJson.debugFields(client, key, { + path: "$.key1", + }), + ).toEqual([1]); + + expect( + await GlideJson.debugFields(client, key, { + path: "$.key3.nested_key.key1", + }), + ).toEqual([2]); + + expect( + await GlideJson.debugMemory(client, key, { + path: "$.key4[2]", + }), + ).toEqual([16]); + + expect( + await GlideJson.debugMemory(client, key, { + path: ".key6", + }), + ).toEqual(16); + + expect(await GlideJson.debugMemory(client, key)).toEqual( + 504, + ); + + expect(await GlideJson.debugFields(client, key)).toEqual( + 19, + ); + + // testing binary input + expect( + await GlideJson.debugMemory(client, Buffer.from(key)), + ).toEqual(504); + + expect( + await GlideJson.debugFields(client, Buffer.from(key)), + ).toEqual(19); }, ); it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "json.strappend tests", + "json.objlen tests", async (protocol) => { client = await GlideClusterClient.createClient( getClientConfigurationOption( @@ -1100,10 +2086,10 @@ describe("Server Module Tests", () => { ); const key = uuidv4(); const jsonValue = { - a: "foo", - nested: { a: "hello" }, - nested2: { a: 31 }, + a: 1.0, + b: { a: { x: 1, y: 2 }, b: 2.5, c: true }, }; + // setup expect( await GlideJson.set( @@ -1115,84 +2101,178 @@ describe("Server Module Tests", () => { ).toBe("OK"); expect( - await GlideJson.strappend(client, key, '"bar"', { - path: "$..a", + await GlideJson.objlen(client, key, { path: "$" }), + ).toEqual([2]); + + expect( + await GlideJson.objlen(client, key, { path: "." }), + ).toEqual(2); + + expect( + await GlideJson.objlen(client, key, { path: "$.." }), + ).toEqual([2, 3, 2]); + + expect( + await GlideJson.objlen(client, key, { path: ".." }), + ).toEqual(2); + + expect( + await GlideJson.objlen(client, key, { path: "$..b" }), + ).toEqual([3, null]); + + expect( + await GlideJson.objlen(client, key, { path: "..b" }), + ).toEqual(3); + + expect( + await GlideJson.objlen(client, Buffer.from(key), { + path: Buffer.from("..a"), + }), + ).toEqual(2); + + expect(await GlideJson.objlen(client, key)).toEqual(2); + + // path doesn't exist + expect( + await GlideJson.objlen(client, key, { + path: "$.non_existing_path", + }), + ).toEqual([]); + + await expect( + GlideJson.objlen(client, key, { + path: "non_existing_path", + }), + ).rejects.toThrow(RequestError); + + // Value at path isnt an object + expect( + await GlideJson.objlen(client, key, { + path: "$.non_existing_path", + }), + ).toEqual([]); + + await expect( + GlideJson.objlen(client, key, { path: ".a" }), + ).rejects.toThrow(RequestError); + + // Non-existing key + expect( + await GlideJson.objlen(client, "non_existing_key", { + path: "$", + }), + ).toBeNull(); + + expect( + await GlideJson.objlen(client, "non_existing_key", { + path: ".", }), - ).toEqual([6, 8, null]); + ).toBeNull(); + expect( - await GlideJson.strappend( + await GlideJson.set( client, key, - JSON.stringify("foo"), - { - path: "a", - }, + "$", + '{"a": 1, "b": 2, "c":3, "d":4}', + ), + ).toBe("OK"); + expect(await GlideJson.objlen(client, key)).toEqual(4); + }, + ); + + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "json.objkeys tests", + async (protocol) => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = { + a: 1.0, + b: { a: { x: 1, y: 2 }, b: 2.5, c: true }, + }; + + // setup + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), ), - ).toBe(9); + ).toBe("OK"); + + expect( + await GlideJson.objkeys(client, key, { path: "$" }), + ).toEqual([["a", "b"]]); + + expect( + await GlideJson.objkeys(client, key, { + path: ".", + decoder: Decoder.Bytes, + }), + ).toEqual([Buffer.from("a"), Buffer.from("b")]); + + expect( + await GlideJson.objkeys(client, Buffer.from(key), { + path: Buffer.from("$.."), + }), + ).toEqual([ + ["a", "b"], + ["a", "b", "c"], + ["x", "y"], + ]); + + expect( + await GlideJson.objkeys(client, key, { path: ".." }), + ).toEqual(["a", "b"]); + + expect( + await GlideJson.objkeys(client, key, { path: "$..b" }), + ).toEqual([["a", "b", "c"], []]); + + expect( + await GlideJson.objkeys(client, key, { path: "..b" }), + ).toEqual(["a", "b", "c"]); + + // path doesn't exist + expect( + await GlideJson.objkeys(client, key, { + path: "$.non_existing_path", + }), + ).toEqual([]); expect( - await GlideJson.get(client, key, { path: "." }), - ).toEqual( - JSON.stringify({ - a: "foobarfoo", - nested: { a: "hellobar" }, - nested2: { a: 31 }, + await GlideJson.objkeys(client, key, { + path: "non_existing_path", }), - ); + ).toBeNull(); + // Value at path isnt an object expect( - await GlideJson.strappend( - client, - key, - JSON.stringify("bar"), - { - path: "$.nested", - }, - ), - ).toEqual([null]); + await GlideJson.objkeys(client, key, { path: "$.a" }), + ).toEqual([[]]); await expect( - GlideJson.strappend( - client, - key, - JSON.stringify("bar"), - { - path: ".nested", - }, - ), - ).rejects.toThrow(RequestError); - await expect( - GlideJson.strappend(client, key, JSON.stringify("bar")), + GlideJson.objkeys(client, key, { path: ".a" }), ).rejects.toThrow(RequestError); + // Non-existing key expect( - await GlideJson.strappend( - client, - key, - JSON.stringify("try"), - { - path: "$.non_existing_path", - }, - ), - ).toEqual([]); + await GlideJson.objkeys(client, "non_existing_key", { + path: "$", + }), + ).toBeNull(); - await expect( - GlideJson.strappend( - client, - key, - JSON.stringify("try"), - { - path: ".non_existing_path", - }, - ), - ).rejects.toThrow(RequestError); - await expect( - GlideJson.strappend( - client, - "non_existing_key", - JSON.stringify("try"), - ), - ).rejects.toThrow(RequestError); + expect( + await GlideJson.objkeys(client, "non_existing_key", { + path: ".", + }), + ).toBeNull(); }, ); }, @@ -1373,7 +2453,7 @@ describe("Server Module Tests", () => { } }); - it("FT.DROPINDEX test", async () => { + it("FT.DROPINDEX FT._LIST FT.LIST", async () => { client = await GlideClusterClient.createClient( getClientConfigurationOption( cluster.getAddresses(), @@ -1399,13 +2479,13 @@ describe("Server Module Tests", () => { ]), ).toEqual("OK"); - const before = await client.customCommand(["FT._LIST"]); + const before = await GlideFt.list(client); expect(before).toContain(index); // DROP it expect(await GlideFt.dropindex(client, index)).toEqual("OK"); - const after = await client.customCommand(["FT._LIST"]); + const after = await GlideFt.list(client); expect(after).not.toContain(index); // dropping the index again results in an error @@ -1418,6 +2498,324 @@ describe("Server Module Tests", () => { } }); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "FT.AGGREGATE ft.aggregate", + async (protocol) => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + + const isResp3 = protocol == ProtocolVersion.RESP3; + const prefixBicycles = "{bicycles}:"; + const indexBicycles = prefixBicycles + uuidv4(); + const prefixMovies = "{movies}:"; + const indexMovies = prefixMovies + uuidv4(); + + // FT.CREATE idx:bicycle ON JSON PREFIX 1 bicycle: SCHEMA $.model AS model TEXT $.description AS + // description TEXT $.price AS price NUMERIC $.condition AS condition TAG SEPARATOR , + expect( + await GlideFt.create( + client, + indexBicycles, + [ + { type: "TEXT", name: "$.model", alias: "model" }, + { + type: "TEXT", + name: "$.description", + alias: "description", + }, + { + type: "NUMERIC", + name: "$.price", + alias: "price", + }, + { + type: "TAG", + name: "$.condition", + alias: "condition", + separator: ",", + }, + ], + { prefixes: [prefixBicycles], dataType: "JSON" }, + ), + ).toEqual("OK"); + + // TODO check JSON module loaded + expect( + await GlideJson.set( + client, + prefixBicycles + 0, + ".", + '{"brand": "Velorim", "model": "Jigger", "price": 270, "condition": "new"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 1, + ".", + '{"brand": "Bicyk", "model": "Hillcraft", "price": 1200, "condition": "used"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 2, + ".", + '{"brand": "Nord", "model": "Chook air 5", "price": 815, "condition": "used"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 3, + ".", + '{"brand": "Eva", "model": "Eva 291", "price": 3400, "condition": "used"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 4, + ".", + '{"brand": "Noka Bikes", "model": "Kahuna", "price": 3200, "condition": "used"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 5, + ".", + '{"brand": "Breakout", "model": "XBN 2.1 Alloy", "price": 810, "condition": "new"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 6, + ".", + '{"brand": "ScramBikes", "model": "WattBike", "price": 2300, "condition": "new"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 7, + ".", + '{"brand": "Peaknetic", "model": "Secto", "price": 430, "condition": "new"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 8, + ".", + '{"brand": "nHill", "model": "Summit", "price": 1200, "condition": "new"}', + ), + ).toEqual("OK"); + + expect( + await GlideJson.set( + client, + prefixBicycles + 9, + ".", + '{"model": "ThrillCycle", "brand": "BikeShind", "price": 815, "condition": "refurbished"}', + ), + ).toEqual("OK"); + + // let server digest the data and update index + await new Promise((resolve) => + setTimeout(resolve, DATA_PROCESSING_TIMEOUT), + ); + + // FT.AGGREGATE idx:bicycle * LOAD 1 __key GROUPBY 1 @condition REDUCE COUNT 0 AS bicycles + let options: FtAggregateOptions = { + loadFields: ["__key"], + clauses: [ + { + type: "GROUPBY", + properties: ["@condition"], + reducers: [ + { + function: "COUNT", + args: [], + name: "bicycles", + }, + ], + }, + ], + }; + let aggreg = ( + await GlideFt.aggregate(client, indexBicycles, "*", options) + ) + .map(convertGlideRecordToRecord) + // elements (records in array) could be reordered + .sort((a, b) => + a["condition"]! > b["condition"]! ? 1 : -1, + ); + expect(aggreg).toEqual([ + { + condition: "new", + bicycles: isResp3 ? 5 : "5", + }, + { + condition: "refurbished", + bicycles: isResp3 ? 1 : "1", + }, + { + condition: "used", + bicycles: isResp3 ? 4 : "4", + }, + ]); + + // FT.CREATE idx:movie ON hash PREFIX 1 "movie:" SCHEMA title TEXT release_year NUMERIC + // rating NUMERIC genre TAG votes NUMERIC + expect( + await GlideFt.create( + client, + indexMovies, + [ + { type: "TEXT", name: "title" }, + { type: "NUMERIC", name: "release_year" }, + { type: "NUMERIC", name: "rating" }, + { type: "TAG", name: "genre" }, + { type: "NUMERIC", name: "votes" }, + ], + { prefixes: [prefixMovies], dataType: "HASH" }, + ), + ).toEqual("OK"); + + await client.hset(prefixMovies + 11002, { + title: "Star Wars: Episode V - The Empire Strikes Back", + release_year: "1980", + genre: "Action", + rating: "8.7", + votes: "1127635", + imdb_id: "tt0080684", + }); + + await client.hset(prefixMovies + 11003, { + title: "The Godfather", + release_year: "1972", + genre: "Drama", + rating: "9.2", + votes: "1563839", + imdb_id: "tt0068646", + }); + + await client.hset(prefixMovies + 11004, { + title: "Heat", + release_year: "1995", + genre: "Thriller", + rating: "8.2", + votes: "559490", + imdb_id: "tt0113277", + }); + + await client.hset(prefixMovies + 11005, { + title: "Star Wars: Episode VI - Return of the Jedi", + release_year: "1983", + genre: "Action", + rating: "8.3", + votes: "906260", + imdb_id: "tt0086190", + }); + + // let server digest the data and update index + await new Promise((resolve) => + setTimeout(resolve, DATA_PROCESSING_TIMEOUT), + ); + + // FT.AGGREGATE idx:movie * LOAD * APPLY ceil(@rating) as r_rating GROUPBY 1 @genre REDUCE + // COUNT 0 AS nb_of_movies REDUCE SUM 1 votes AS nb_of_votes REDUCE AVG 1 r_rating AS avg_rating + // SORTBY 4 @avg_rating DESC @nb_of_votes DESC + options = { + loadAll: true, + clauses: [ + { + type: "APPLY", + expression: "ceil(@rating)", + name: "r_rating", + }, + { + type: "GROUPBY", + properties: ["@genre"], + reducers: [ + { + function: "COUNT", + args: [], + name: "nb_of_movies", + }, + { + function: "SUM", + args: ["votes"], + name: "nb_of_votes", + }, + { + function: "AVG", + args: ["r_rating"], + name: "avg_rating", + }, + ], + }, + { + type: "SORTBY", + properties: [ + { + property: "@avg_rating", + order: SortOrder.DESC, + }, + { + property: "@nb_of_votes", + order: SortOrder.DESC, + }, + ], + }, + ], + }; + aggreg = ( + await GlideFt.aggregate(client, indexMovies, "*", options) + ) + .map(convertGlideRecordToRecord) + // elements (records in array) could be reordered + .sort((a, b) => (a["genre"]! > b["genre"]! ? 1 : -1)); + expect(aggreg).toEqual([ + { + genre: "Action", + nb_of_movies: isResp3 ? 2.0 : "2", + nb_of_votes: isResp3 ? 2033895.0 : "2033895", + avg_rating: isResp3 ? 9.0 : "9", + }, + { + genre: "Drama", + nb_of_movies: isResp3 ? 1.0 : "1", + nb_of_votes: isResp3 ? 1563839.0 : "1563839", + avg_rating: isResp3 ? 10.0 : "10", + }, + { + genre: "Thriller", + nb_of_movies: isResp3 ? 1.0 : "1", + nb_of_votes: isResp3 ? 559490.0 : "559490", + avg_rating: isResp3 ? 9.0 : "9", + }, + ]); + + await GlideFt.dropindex(client, indexMovies); + await GlideFt.dropindex(client, indexBicycles); + }, + ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "FT.INFO ft.info", async (protocol) => { @@ -1491,7 +2889,7 @@ describe("Server Module Tests", () => { }, ); - it("FT.SEARCH binary test", async () => { + it("FT.EXPLAIN ft.explain FT.EXPLAINCLI ft.explaincli", async () => { client = await GlideClusterClient.createClient( getClientConfigurationOption( cluster.getAddresses(), @@ -1499,6 +2897,70 @@ describe("Server Module Tests", () => { ), ); + const index = uuidv4(); + expect( + await GlideFt.create(client, index, [ + { type: "NUMERIC", name: "price" }, + { type: "TEXT", name: "title" }, + ]), + ).toEqual("OK"); + + let explain = await GlideFt.explain( + client, + Buffer.from(index), + "@price:[0 10]", + ); + expect(explain).toContain("price"); + expect(explain).toContain("10"); + + explain = ( + (await GlideFt.explain(client, index, "@price:[0 10]", { + decoder: Decoder.Bytes, + })) as Buffer + ).toString(); + expect(explain).toContain("price"); + expect(explain).toContain("10"); + + explain = await GlideFt.explain(client, index, "*"); + expect(explain).toContain("*"); + + let explaincli = ( + await GlideFt.explaincli( + client, + Buffer.from(index), + "@price:[0 10]", + ) + ).map((s) => (s as string).trim()); + expect(explaincli).toContain("price"); + expect(explaincli).toContain("0"); + expect(explaincli).toContain("10"); + + explaincli = ( + await GlideFt.explaincli(client, index, "@price:[0 10]", { + decoder: Decoder.Bytes, + }) + ).map((s) => (s as Buffer).toString().trim()); + expect(explaincli).toContain("price"); + expect(explaincli).toContain("0"); + expect(explaincli).toContain("10"); + + expect(await GlideFt.dropindex(client, index)).toEqual("OK"); + // querying a missing index + await expect(GlideFt.explain(client, index, "*")).rejects.toThrow( + "Index not found", + ); + await expect( + GlideFt.explaincli(client, index, "*"), + ).rejects.toThrow("Index not found"); + }); + + it("FT.SEARCH binary test", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + ProtocolVersion.RESP3, + ), + ); const prefix = "{" + uuidv4() + "}:"; const index = prefix + "index"; @@ -1694,5 +3156,84 @@ describe("Server Module Tests", () => { ]; expect(stringResult).toEqual(expectedStringResult); }); + + it("FT.ALIASADD, FT.ALIASUPDATE and FT.ALIASDEL test", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + ProtocolVersion.RESP3, + ), + ); + const index = uuidv4(); + const alias = uuidv4() + "-alias"; + + // Create an index. + expect( + await GlideFt.create(client, index, [ + { type: "NUMERIC", name: "published_at" }, + { type: "TAG", name: "category" }, + ]), + ).toEqual("OK"); + // Check if the index created successfully. + expect(await client.customCommand(["FT._LIST"])).toContain(index); + + // Add an alias to the index. + expect(await GlideFt.aliasadd(client, index, alias)).toEqual("OK"); + + const newIndex = uuidv4(); + const newAlias = uuidv4(); + + // Create a second index. + expect( + await GlideFt.create(client, newIndex, [ + { type: "NUMERIC", name: "published_at" }, + { type: "TAG", name: "category" }, + ]), + ).toEqual("OK"); + // Check if the second index created successfully. + expect(await client.customCommand(["FT._LIST"])).toContain( + newIndex, + ); + + // Add an alias to second index and also test addalias for bytes type input. + expect( + await GlideFt.aliasadd( + client, + Buffer.from(newIndex), + Buffer.from(newAlias), + ), + ).toEqual("OK"); + + // Test if updating an already existing alias to point to an existing index returns "OK". + expect(await GlideFt.aliasupdate(client, newAlias, index)).toEqual( + "OK", + ); + // Test alias update for byte type input. + expect( + await GlideFt.aliasupdate( + client, + Buffer.from(alias), + Buffer.from(newIndex), + ), + ).toEqual("OK"); + + // Test if an existing alias is deleted successfully. + expect(await GlideFt.aliasdel(client, alias)).toEqual("OK"); + + // Test if an existing alias is deleted successfully for bytes type input. + expect( + await GlideFt.aliasdel(client, Buffer.from(newAlias)), + ).toEqual("OK"); + + // Drop both indexes. + expect(await GlideFt.dropindex(client, index)).toEqual("OK"); + expect(await client.customCommand(["FT._LIST"])).not.toContain( + index, + ); + expect(await GlideFt.dropindex(client, newIndex)).toEqual("OK"); + expect(await client.customCommand(["FT._LIST"])).not.toContain( + newIndex, + ); + }); }); }); diff --git a/node/tests/SharedTests.ts b/node/tests/SharedTests.ts index 97a0cce645..661f0a8b7a 100644 --- a/node/tests/SharedTests.ts +++ b/node/tests/SharedTests.ts @@ -20,7 +20,6 @@ import { BitOverflowControl, BitmapIndexType, BitwiseOperation, - ClosingError, ClusterTransaction, ConditionalChange, Decoder, @@ -127,13 +126,9 @@ export function runBaseTests(config: { await runTest(async (client: BaseClient) => { client.close(); - try { - expect(await client.set("foo", "bar")).toThrow(); - } catch (e) { - expect((e as ClosingError).message).toMatch( - "Unable to execute requests; the client is closed. Please create a new client.", - ); - } + await expect(client.set("foo", "bar")).rejects.toThrow( + "Unable to execute requests; the client is closed. Please create a new client.", + ); }, protocol); }, config.timeout, @@ -300,14 +295,10 @@ export function runBaseTests(config: { if (conf_file.length > 0) { expect(await client.configRewrite()).toEqual("OK"); } else { - try { - /// We expect Valkey to return an error since the test cluster doesn't use redis.conf file - expect(await client.configRewrite()).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "The server is running without a config file", - ); - } + /// We expect Valkey to return an error since the test cluster doesn't use redis.conf file + await expect(client.configRewrite()).rejects.toThrow( + "The server is running without a config file", + ); } }, protocol); }, @@ -505,29 +496,16 @@ export function runBaseTests(config: { const key = uuidv4(); expect(await client.set(key, "foo")).toEqual("OK"); - try { - expect(await client.incr(key)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "value is not an integer", - ); - } - - try { - expect(await client.incrBy(key, 1)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "value is not an integer", - ); - } + await expect(client.incr(key)).rejects.toThrow( + "value is not an integer", + ); - try { - expect(await client.incrByFloat(key, 1.5)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "value is not a valid float", - ); - } + await expect(client.incrBy(key, 1)).rejects.toThrow( + "value is not an integer", + ); + await expect(client.incrByFloat(key, 1.5)).rejects.toThrow( + "value is not a valid float", + ); }, protocol); }, config.timeout, @@ -617,21 +595,13 @@ export function runBaseTests(config: { const key = uuidv4(); expect(await client.set(key, "foo")).toEqual("OK"); - try { - expect(await client.decr(key)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "value is not an integer", - ); - } + await expect(client.decr(key)).rejects.toThrow( + "value is not an integer", + ); - try { - expect(await client.decrBy(key, 3)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "value is not an integer", - ); - } + await expect(client.decrBy(key, 3)).rejects.toThrow( + "value is not an integer", + ); }, protocol); }, config.timeout, @@ -1929,23 +1899,12 @@ export function runBaseTests(config: { }; expect(await client.hset(key, fieldValueMap)).toEqual(1); - try { - expect(await client.hincrBy(key, field, 2)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "hash value is not an integer", - ); - } - - try { - expect( - await client.hincrByFloat(key, field, 1.5), - ).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "hash value is not a float", - ); - } + await expect(client.hincrBy(key, field, 2)).rejects.toThrow( + "hash value is not an integer", + ); + await expect( + client.hincrByFloat(key, field, 1.5), + ).rejects.toThrow("hash value is not a float"); }, protocol); }, config.timeout, @@ -2198,29 +2157,15 @@ export function runBaseTests(config: { const key = uuidv4(); expect(await client.set(key, "foo")).toEqual("OK"); - try { - expect(await client.lpush(key, ["bar"])).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } - - try { - expect(await client.lpop(key)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } - - try { - expect(await client.lrange(key, 0, -1)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } + await expect(client.lpush(key, ["bar"])).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); + await expect(client.lpop(key)).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); + await expect(client.lrange(key, 0, -1)).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); }, protocol); }, config.timeout, @@ -2292,13 +2237,9 @@ export function runBaseTests(config: { expect(await client.set(key2, "foo")).toEqual("OK"); - try { - expect(await client.llen(key2)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } + await expect(client.llen(key2)).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); }, protocol); }, config.timeout, @@ -2660,13 +2601,9 @@ export function runBaseTests(config: { expect(await client.set(key, "foo")).toEqual("OK"); - try { - expect(await client.ltrim(key, 0, 1)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } + await expect(client.ltrim(key, 0, 1)).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); //test for binary key as input to the command const key2 = uuidv4(); @@ -2774,21 +2711,12 @@ export function runBaseTests(config: { const key = uuidv4(); expect(await client.set(key, "foo")).toEqual("OK"); - try { - expect(await client.rpush(key, ["bar"])).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } - - try { - expect(await client.rpop(key)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } + await expect(client.rpush(key, ["bar"])).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); + await expect(client.rpop(key)).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); }, protocol); }, config.timeout, @@ -2974,37 +2902,18 @@ export function runBaseTests(config: { const key = uuidv4(); expect(await client.set(key, "foo")).toEqual("OK"); - try { - expect(await client.sadd(key, ["bar"])).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } - - try { - expect(await client.srem(key, ["bar"])).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } - - try { - expect(await client.scard(key)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } - - try { - expect(await client.smembers(key)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } + await expect(client.sadd(key, ["bar"])).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); + await expect(client.srem(key, ["bar"])).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); + await expect(client.scard(key)).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); + await expect(client.smembers(key)).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); }, protocol); }, config.timeout, @@ -3036,13 +2945,9 @@ export function runBaseTests(config: { ).toEqual(new Set([Buffer.from("c"), Buffer.from("d")])); // invalid argument - key list must not be empty - try { - expect(await client.sinter([])).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "ResponseError: wrong number of arguments", - ); - } + await expect(client.sinter([])).rejects.toThrow( + "wrong number of arguments", + ); // non-existing key returns empty set expect(await client.sinter([key1, non_existing_key])).toEqual( @@ -3052,13 +2957,9 @@ export function runBaseTests(config: { // non-set key expect(await client.set(key2, "value")).toEqual("OK"); - try { - expect(await client.sinter([key2])).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "Operation against a key holding the wrong kind of value", - ); - } + await expect(client.sinter([key2])).rejects.toThrow( + "Operation against a key holding the wrong kind of value", + ); }, protocol); }, config.timeout, @@ -4232,6 +4133,18 @@ export function runBaseTests(config: { config.timeout, ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "script kill killable test_%p", + async (protocol) => { + await runTest(async (client: BaseClient) => { + await expect(client.scriptKill()).rejects.toThrow( + "No scripts in execution right now", + ); + }, protocol); + }, + config.timeout, + ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `zadd and zaddIncr with NX XX test_%p`, async (protocol) => { @@ -6692,19 +6605,6 @@ export function runBaseTests(config: { // key exists, but it is not a list await client.set("foo", "bar"); await expect(client.brpop(["foo"], 0.1)).rejects.toThrow(); - - // Same-slot requirement - if (client instanceof GlideClusterClient) { - try { - expect( - await client.brpop(["abc", "zxy", "lkn"], 0.1), - ).toThrow(); - } catch (e) { - expect((e as Error).message.toLowerCase()).toMatch( - "crossslot", - ); - } - } }, protocol); }, config.timeout, @@ -6735,19 +6635,6 @@ export function runBaseTests(config: { // key exists, but it is not a list await client.set("foo", "bar"); await expect(client.blpop(["foo"], 0.1)).rejects.toThrow(); - - // Same-slot requirement - if (client instanceof GlideClusterClient) { - try { - expect( - await client.blpop(["abc", "zxy", "lkn"], 0.1), - ).toThrow(); - } catch (e) { - expect((e as Error).message.toLowerCase()).toMatch( - "crossslot", - ); - } - } }, protocol); }, config.timeout, @@ -7736,11 +7623,9 @@ export function runBaseTests(config: { const key3 = `{key}-3-${uuidv4()}`; // renamenx missing key - try { - expect(await client.renamenx(key1, key2)).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch("no such key"); - } + await expect(client.renamenx(key1, key2)).rejects.toThrow( + "no such key", + ); // renamenx a string await client.set(key1, "key1"); @@ -7969,13 +7854,9 @@ export function runBaseTests(config: { expect(await client.pfcount([key3])).toEqual(0); // invalid argument - key list must not be empty - try { - expect(await client.pfcount([])).toThrow(); - } catch (e) { - expect((e as Error).message).toMatch( - "ResponseError: wrong number of arguments", - ); - } + await expect(client.pfcount([])).rejects.toThrow( + "ResponseError: wrong number of arguments", + ); // key exists, but it is not a HyperLogLog expect(await client.set(stringKey, "value")).toEqual("OK"); diff --git a/node/tests/TestUtilities.ts b/node/tests/TestUtilities.ts index 85cfb740fb..b47af160e5 100644 --- a/node/tests/TestUtilities.ts +++ b/node/tests/TestUtilities.ts @@ -4,7 +4,6 @@ import { expect } from "@jest/globals"; import { exec } from "child_process"; -import parseArgs from "minimist"; import { gte } from "semver"; import { v4 as uuidv4 } from "uuid"; import { @@ -329,40 +328,6 @@ export function createLongRunningLuaScript( return script.replaceAll("$timeout", timeout.toString()); } -export async function waitForScriptNotBusy( - client: GlideClusterClient | GlideClient, -) { - // If function wasn't killed, and it didn't time out - it blocks the server and cause rest test to fail. - let isBusy = true; - - do { - try { - await client.scriptKill(); - } catch (err) { - // should throw `notbusy` error, because the function should be killed before - if ((err as Error).message.toLowerCase().includes("notbusy")) { - isBusy = false; - } - } - } while (isBusy); -} - -/** - * Parses the command-line arguments passed to the Node.js process. - * - * @returns Parsed command-line arguments. - * - * @example - * ```typescript - * // Command: node script.js --name="John Doe" --age=30 - * const args = parseCommandLineArgs(); - * // args = { name: 'John Doe', age: 30 } - * ``` - */ -export function parseCommandLineArgs() { - return parseArgs(process.argv.slice(2)); -} - export async function testTeardown( cluster_mode: boolean, option: BaseClientConfiguration, @@ -386,7 +351,7 @@ export const getClientConfigurationOption = ( port, })), protocol, - useTLS: parseCommandLineArgs()["tls"] == "true", + useTLS: global.TLS ?? false, requestTimeout: 1000, ...configOverrides, }; diff --git a/node/tests/setup.js b/node/tests/setup.js deleted file mode 100644 index d9b8d0b74b..0000000000 --- a/node/tests/setup.js +++ /dev/null @@ -1,7 +0,0 @@ -import { beforeAll } from "@jest/globals"; -import { Logger } from ".."; - -beforeAll(() => { - Logger.init("off"); - Logger.setLoggerConfig("off"); -}); diff --git a/node/tests/setup.ts b/node/tests/setup.ts new file mode 100644 index 0000000000..337d5430d2 --- /dev/null +++ b/node/tests/setup.ts @@ -0,0 +1,23 @@ +/* eslint-disable no-var */ +import { beforeAll } from "@jest/globals"; +import minimist from "minimist"; +import { Logger } from "../build-ts"; + +beforeAll(() => { + Logger.init("error", "log.log"); + // Logger.setLoggerConfig("off"); +}); + +declare global { + var CLI_ARGS: Record; + var CLUSTER_ENDPOINTS: string; + var STAND_ALONE_ENDPOINT: string; + var TLS: boolean; +} + +const args = minimist(process.argv.slice(2)); +// Make the arguments available globally +global.CLI_ARGS = args; +global.CLUSTER_ENDPOINTS = args["cluster-endpoints"] as string; +global.STAND_ALONE_ENDPOINT = args["standalone-endpoints"] as string; +global.TLS = !!args.tls; diff --git a/node/tests/tsconfig.json b/node/tests/tsconfig.json index f7b57f9a00..ab48e3f902 100644 --- a/node/tests/tsconfig.json +++ b/node/tests/tsconfig.json @@ -3,5 +3,5 @@ "compilerOptions": { "rootDir": "../../" }, - "include": ["*.ts", "./*.test.ts"] + "include": ["*.ts", "./*.test.ts", "setup.ts"] } diff --git a/python/python/glide/__init__.py b/python/python/glide/__init__.py index 4db35d0e30..7d92c38b63 100644 --- a/python/python/glide/__init__.py +++ b/python/python/glide/__init__.py @@ -63,8 +63,8 @@ FtProfileOptions, ) from glide.async_commands.server_modules.ft_options.ft_search_options import ( - FtSeachOptions, FtSearchLimit, + FtSearchOptions, ReturnField, ) from glide.async_commands.server_modules.json import ( @@ -305,7 +305,7 @@ "VectorType", "FtSearchLimit", "ReturnField", - "FtSeachOptions", + "FtSearchOptions", "FtAggregateApply", "FtAggregateFilter", "FtAggregateClause", diff --git a/python/python/glide/async_commands/core.py b/python/python/glide/async_commands/core.py index 7acb44ca60..edb5178c0a 100644 --- a/python/python/glide/async_commands/core.py +++ b/python/python/glide/async_commands/core.py @@ -613,7 +613,13 @@ async def delete(self, keys: List[TEncodable]) -> int: See https://valkey.io/commands/del/ for details. Note: - When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + In cluster mode, if keys in `keys` map to different hash slots, + the command will be split across these slots and executed separately for each. + This means the command is atomic only at the slot level. If one or more slot-specific + requests fail, the entire call will return the first encountered error, even + though some requests may have succeeded while others did not. + If this behavior impacts your application logic, consider splitting the + request into sub-requests per slot to ensure atomicity. Args: keys (List[TEncodable]): A list of keys to be deleted from the database. @@ -730,7 +736,13 @@ async def mset(self, key_value_map: Mapping[TEncodable, TEncodable]) -> TOK: See https://valkey.io/commands/mset/ for more details. Note: - When in cluster mode, the command may route to multiple nodes when keys in `key_value_map` map to different hash slots. + In cluster mode, if keys in `key_value_map` map to different hash slots, + the command will be split across these slots and executed separately for each. + This means the command is atomic only at the slot level. If one or more slot-specific + requests fail, the entire call will return the first encountered error, even + though some requests may have succeeded while others did not. + If this behavior impacts your application logic, consider splitting the + request into sub-requests per slot to ensure atomicity. Args: key_value_map (Mapping[TEncodable, TEncodable]): A map of key value pairs. @@ -783,8 +795,13 @@ async def mget(self, keys: List[TEncodable]) -> List[Optional[bytes]]: See https://valkey.io/commands/mget/ for more details. Note: - When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. - + In cluster mode, if keys in `keys` map to different hash slots, + the command will be split across these slots and executed separately for each. + This means the command is atomic only at the slot level. If one or more slot-specific + requests fail, the entire call will return the first encountered error, even + though some requests may have succeeded while others did not. + If this behavior impacts your application logic, consider splitting the + request into sub-requests per slot to ensure atomicity. Args: keys (List[TEncodable]): A list of keys to retrieve values for. @@ -850,7 +867,14 @@ async def touch(self, keys: List[TEncodable]) -> int: See https://valkey.io/commands/touch/ for details. Note: - When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + In cluster mode, if keys in `key_value_map` map to different hash slots, + the command will be split across these slots and executed separately for each. + This means the command is atomic only at the slot level. If one or more slot-specific + requests fail, the entire call will return the first encountered error, even + though some requests may have succeeded while others did not. + If this behavior impacts your application logic, consider splitting the + request into sub-requests per slot to ensure atomicity. Args: + keys (List[TEncodable]): The list of keys to unlink. Args: keys (List[TEncodable]): The keys to update last access time. @@ -2303,7 +2327,13 @@ async def exists(self, keys: List[TEncodable]) -> int: See https://valkey.io/commands/exists/ for more details. Note: - When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + In cluster mode, if keys in `keys` map to different hash slots, + the command will be split across these slots and executed separately for each. + This means the command is atomic only at the slot level. If one or more slot-specific + requests fail, the entire call will return the first encountered error, even + though some requests may have succeeded while others did not. + If this behavior impacts your application logic, consider splitting the + request into sub-requests per slot to ensure atomicity. Args: keys (List[TEncodable]): The list of keys to check. @@ -2327,7 +2357,13 @@ async def unlink(self, keys: List[TEncodable]) -> int: See https://valkey.io/commands/unlink/ for more details. Note: - When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + In cluster mode, if keys in `key_value_map` map to different hash slots, + the command will be split across these slots and executed separately for each. + This means the command is atomic only at the slot level. If one or more slot-specific + requests fail, the entire call will return the first encountered error, even + though some requests may have succeeded while others did not. + If this behavior impacts your application logic, consider splitting the + request into sub-requests per slot to ensure atomicity. Args: keys (List[TEncodable]): The list of keys to unlink. @@ -6360,7 +6396,13 @@ async def watch(self, keys: List[TEncodable]) -> TOK: See https://valkey.io/commands/watch for more details. Note: - When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + In cluster mode, if keys in `key_value_map` map to different hash slots, + the command will be split across these slots and executed separately for each. + This means the command is atomic only at the slot level. If one or more slot-specific + requests fail, the entire call will return the first encountered error, even + though some requests may have succeeded while others did not. + If this behavior impacts your application logic, consider splitting the + request into sub-requests per slot to ensure atomicity. Args: keys (List[TEncodable]): The keys to watch. diff --git a/python/python/glide/async_commands/server_modules/ft.py b/python/python/glide/async_commands/server_modules/ft.py index c8a757a979..575f64cb56 100644 --- a/python/python/glide/async_commands/server_modules/ft.py +++ b/python/python/glide/async_commands/server_modules/ft.py @@ -20,7 +20,7 @@ FtProfileOptions, ) from glide.async_commands.server_modules.ft_options.ft_search_options import ( - FtSeachOptions, + FtSearchOptions, ) from glide.constants import ( TOK, @@ -35,7 +35,7 @@ async def create( client: TGlideClient, - indexName: TEncodable, + index_name: TEncodable, schema: List[Field], options: Optional[FtCreateOptions] = None, ) -> TOK: @@ -44,9 +44,9 @@ async def create( Args: client (TGlideClient): The client to execute the command. - indexName (TEncodable): The index name. + index_name (TEncodable): The index name. schema (List[Field]): Fields to populate into the index. Equivalent to `SCHEMA` block in the module API. - options (Optional[FtCreateOptions]): Optional arguments for the FT.CREATE command. See `FtCreateOptions`. + options (Optional[FtCreateOptions]): Optional arguments for the FT.CREATE command. Returns: TOK: A simple "OK" response. @@ -55,26 +55,26 @@ async def create( >>> from glide import ft >>> schema: List[Field] = [TextField("title")] >>> prefixes: List[str] = ["blog:post:"] - >>> result = await ft.create(glide_client, "my_idx1", schema, FtCreateOptions(DataType.HASH, prefixes)) + >>> await ft.create(glide_client, "my_idx1", schema, FtCreateOptions(DataType.HASH, prefixes)) 'OK' # Indicates successful creation of index named 'idx' """ - args: List[TEncodable] = [CommandNames.FT_CREATE, indexName] + args: List[TEncodable] = [CommandNames.FT_CREATE, index_name] if options: - args.extend(options.toArgs()) + args.extend(options.to_args()) if schema: args.append(FtCreateKeywords.SCHEMA) for field in schema: - args.extend(field.toArgs()) + args.extend(field.to_args()) return cast(TOK, await client.custom_command(args)) -async def dropindex(client: TGlideClient, indexName: TEncodable) -> TOK: +async def dropindex(client: TGlideClient, index_name: TEncodable) -> TOK: """ Drops an index. The index definition and associated content are deleted. Keys are unaffected. Args: client (TGlideClient): The client to execute the command. - indexName (TEncodable): The index name for the index to be dropped. + index_name (TEncodable): The index name for the index to be dropped. Returns: TOK: A simple "OK" response. @@ -82,28 +82,46 @@ async def dropindex(client: TGlideClient, indexName: TEncodable) -> TOK: Examples: For the following example to work, an index named 'idx' must be already created. If not created, you will get an error. >>> from glide import ft - >>> indexName = "idx" - >>> result = await ft.dropindex(glide_client, indexName) + >>> index_name = "idx" + >>> await ft.dropindex(glide_client, index_name) 'OK' # Indicates successful deletion/dropping of index named 'idx' """ - args: List[TEncodable] = [CommandNames.FT_DROPINDEX, indexName] + args: List[TEncodable] = [CommandNames.FT_DROPINDEX, index_name] return cast(TOK, await client.custom_command(args)) +async def list(client: TGlideClient) -> List[TEncodable]: + """ + Lists all indexes. + + Args: + client (TGlideClient): The client to execute the command. + + Returns: + List[TEncodable]: An array of index names. + + Examples: + >>> from glide import ft + >>> await ft.list(glide_client) + [b"index1", b"index2"] + """ + return cast(List[TEncodable], await client.custom_command([CommandNames.FT_LIST])) + + async def search( client: TGlideClient, - indexName: TEncodable, + index_name: TEncodable, query: TEncodable, - options: Optional[FtSeachOptions], + options: Optional[FtSearchOptions], ) -> FtSearchResponse: """ Uses the provided query expression to locate keys within an index. Once located, the count and/or the content of indexed fields within those keys can be returned. Args: client (TGlideClient): The client to execute the command. - indexName (TEncodable): The index name to search into. + index_name (TEncodable): The index name to search into. query (TEncodable): The text query to search. - options (Optional[FtSeachOptions]): The search options. See `FtSearchOptions`. + options (Optional[FtSearchOptions]): The search options. Returns: FtSearchResponse: A two element array, where first element is count of documents in result set, and the second element, which has the format Mapping[TEncodable, Mapping[TEncodable, TEncodable]] is a mapping between document names and map of their attributes. @@ -115,17 +133,17 @@ async def search( - A key named {json:}1 with value {"a":1, "b":2} >>> from glide import ft - >>> result = await ft.search(glide_client, "idx", "*", options=FtSeachOptions(return_fields=[ReturnField(field_identifier="first"), ReturnField(field_identifier="second")])) + >>> await ft.search(glide_client, "idx", "*", options=FtSeachOptions(return_fields=[ReturnField(field_identifier="first"), ReturnField(field_identifier="second")])) [1, { b'json:1': { b'first': b'42', b'second': b'33' } }] # The first element, 1 is the number of keys returned in the search result. The second element is a map of data queried per key. """ - args: List[TEncodable] = [CommandNames.FT_SEARCH, indexName, query] + args: List[TEncodable] = [CommandNames.FT_SEARCH, index_name, query] if options: - args.extend(options.toArgs()) + args.extend(options.to_args()) return cast(FtSearchResponse, await client.custom_command(args)) async def aliasadd( - client: TGlideClient, alias: TEncodable, indexName: TEncodable + client: TGlideClient, alias: TEncodable, index_name: TEncodable ) -> TOK: """ Adds an alias for an index. The new alias name can be used anywhere that an index name is required. @@ -133,17 +151,17 @@ async def aliasadd( Args: client (TGlideClient): The client to execute the command. alias (TEncodable): The alias to be added to an index. - indexName (TEncodable): The index name for which the alias has to be added. + index_name (TEncodable): The index name for which the alias has to be added. Returns: TOK: A simple "OK" response. Examples: >>> from glide import ft - >>> result = await ft.aliasadd(glide_client, "myalias", "myindex") + >>> await ft.aliasadd(glide_client, "myalias", "myindex") 'OK' # Indicates the successful addition of the alias named "myalias" for the index. """ - args: List[TEncodable] = [CommandNames.FT_ALIASADD, alias, indexName] + args: List[TEncodable] = [CommandNames.FT_ALIASADD, alias, index_name] return cast(TOK, await client.custom_command(args)) @@ -160,7 +178,7 @@ async def aliasdel(client: TGlideClient, alias: TEncodable) -> TOK: Examples: >>> from glide import ft - >>> result = await ft.aliasdel(glide_client, "myalias") + >>> await ft.aliasdel(glide_client, "myalias") 'OK' # Indicates the successful deletion of the alias named "myalias" """ args: List[TEncodable] = [CommandNames.FT_ALIASDEL, alias] @@ -168,7 +186,7 @@ async def aliasdel(client: TGlideClient, alias: TEncodable) -> TOK: async def aliasupdate( - client: TGlideClient, alias: TEncodable, indexName: TEncodable + client: TGlideClient, alias: TEncodable, index_name: TEncodable ) -> TOK: """ Updates an existing alias to point to a different physical index. This command only affects future references to the alias. @@ -176,35 +194,35 @@ async def aliasupdate( Args: client (TGlideClient): The client to execute the command. alias (TEncodable): The alias name. This alias will now be pointed to a different index. - indexName (TEncodable): The index name for which an existing alias has to updated. + index_name (TEncodable): The index name for which an existing alias has to updated. Returns: TOK: A simple "OK" response. Examples: >>> from glide import ft - >>> result = await ft.aliasupdate(glide_client, "myalias", "myindex") + >>> await ft.aliasupdate(glide_client, "myalias", "myindex") 'OK' # Indicates the successful update of the alias to point to the index named "myindex" """ - args: List[TEncodable] = [CommandNames.FT_ALIASUPDATE, alias, indexName] + args: List[TEncodable] = [CommandNames.FT_ALIASUPDATE, alias, index_name] return cast(TOK, await client.custom_command(args)) -async def info(client: TGlideClient, indexName: TEncodable) -> FtInfoResponse: +async def info(client: TGlideClient, index_name: TEncodable) -> FtInfoResponse: """ Returns information about a given index. Args: client (TGlideClient): The client to execute the command. - indexName (TEncodable): The index name for which the information has to be returned. + index_name (TEncodable): The index name for which the information has to be returned. Returns: - FtInfoResponse: Nested maps with info about the index. See example for more details. See `FtInfoResponse`. + FtInfoResponse: Nested maps with info about the index. See example for more details. Examples: An index with name 'myIndex', 1 text field and 1 vector field is already created for gettting the output of this example. >>> from glide import ft - >>> result = await ft.info(glide_client, "myIndex") + >>> await ft.info(glide_client, "myIndex") [ b'index_name', b'myIndex', @@ -238,19 +256,19 @@ async def info(client: TGlideClient, indexName: TEncodable) -> FtInfoResponse: b'index_degradation_percentage', 0 ] """ - args: List[TEncodable] = [CommandNames.FT_INFO, indexName] + args: List[TEncodable] = [CommandNames.FT_INFO, index_name] return cast(FtInfoResponse, await client.custom_command(args)) async def explain( - client: TGlideClient, indexName: TEncodable, query: TEncodable + client: TGlideClient, index_name: TEncodable, query: TEncodable ) -> TEncodable: """ Parse a query and return information about how that query was parsed. Args: client (TGlideClient): The client to execute the command. - indexName (TEncodable): The index name for which the query is written. + index_name (TEncodable): The index name for which the query is written. query (TEncodable): The search query, same as the query passed as an argument to FT.SEARCH. Returns: @@ -258,22 +276,22 @@ async def explain( Examples: >>> from glide import ft - >>> result = await ft.explain(glide_client, indexName="myIndex", query="@price:[0 10]") + >>> await ft.explain(glide_client, indexName="myIndex", query="@price:[0 10]") b'Field {\n price\n 0\n 10\n}\n' # Parsed results. """ - args: List[TEncodable] = [CommandNames.FT_EXPLAIN, indexName, query] + args: List[TEncodable] = [CommandNames.FT_EXPLAIN, index_name, query] return cast(TEncodable, await client.custom_command(args)) async def explaincli( - client: TGlideClient, indexName: TEncodable, query: TEncodable + client: TGlideClient, index_name: TEncodable, query: TEncodable ) -> List[TEncodable]: """ Same as the FT.EXPLAIN command except that the results are displayed in a different format. More useful with cli. Args: client (TGlideClient): The client to execute the command. - indexName (TEncodable): The index name for which the query is written. + index_name (TEncodable): The index name for which the query is written. query (TEncodable): The search query, same as the query passed as an argument to FT.SEARCH. Returns: @@ -281,16 +299,16 @@ async def explaincli( Examples: >>> from glide import ft - >>> result = await ft.explaincli(glide_client, indexName="myIndex", query="@price:[0 10]") + >>> await ft.explaincli(glide_client, indexName="myIndex", query="@price:[0 10]") [b'Field {', b' price', b' 0', b' 10', b'}', b''] # Parsed results. """ - args: List[TEncodable] = [CommandNames.FT_EXPLAINCLI, indexName, query] + args: List[TEncodable] = [CommandNames.FT_EXPLAINCLI, index_name, query] return cast(List[TEncodable], await client.custom_command(args)) async def aggregate( client: TGlideClient, - indexName: TEncodable, + index_name: TEncodable, query: TEncodable, options: Optional[FtAggregateOptions], ) -> FtAggregateResponse: @@ -299,7 +317,7 @@ async def aggregate( Args: client (TGlideClient): The client to execute the command. - indexName (TEncodable): The index name for which the query is written. + index_name (TEncodable): The index name for which the query is written. query (TEncodable): The search query, same as the query passed as an argument to FT.SEARCH. options (Optional[FtAggregateOptions]): The optional arguments for the command. @@ -308,24 +326,24 @@ async def aggregate( Examples: >>> from glide import ft - >>> result = await ft.aggregate(glide_client, "myIndex", "*", FtAggregateOptions(loadFields=["__key"], clauses=[GroupBy(["@condition"], [Reducer("COUNT", [], "bicycles")])])) + >>> await ft.aggregate(glide_client, "myIndex", "*", FtAggregateOptions(loadFields=["__key"], clauses=[GroupBy(["@condition"], [Reducer("COUNT", [], "bicycles")])])) [{b'condition': b'refurbished', b'bicycles': b'1'}, {b'condition': b'new', b'bicycles': b'5'}, {b'condition': b'used', b'bicycles': b'4'}] """ - args: List[TEncodable] = [CommandNames.FT_AGGREGATE, indexName, query] + args: List[TEncodable] = [CommandNames.FT_AGGREGATE, index_name, query] if options: args.extend(options.to_args()) return cast(FtAggregateResponse, await client.custom_command(args)) async def profile( - client: TGlideClient, indexName: TEncodable, options: FtProfileOptions + client: TGlideClient, index_name: TEncodable, options: FtProfileOptions ) -> FtProfileResponse: """ Runs a search or aggregation query and collects performance profiling information. Args: client (TGlideClient): The client to execute the command. - indexName (TEncodable): The index name + index_name (TEncodable): The index name options (FtProfileOptions): Options for the command. Returns: @@ -333,7 +351,7 @@ async def profile( Examples: >>> ftSearchOptions = FtSeachOptions(return_fields=[ReturnField(field_identifier="a", alias="a_new"), ReturnField(field_identifier="b", alias="b_new")]) - >>> ftProfileResult = await ft.profile(glide_client, "myIndex", FtProfileOptions.from_query_options(query="*", queryOptions=ftSearchOptions)) + >>> await ft.profile(glide_client, "myIndex", FtProfileOptions.from_query_options(query="*", queryOptions=ftSearchOptions)) [ [ 2, @@ -357,5 +375,5 @@ async def profile( } ] """ - args: List[TEncodable] = [CommandNames.FT_PROFILE, indexName] + options.to_args() + args: List[TEncodable] = [CommandNames.FT_PROFILE, index_name] + options.to_args() return cast(FtProfileResponse, await client.custom_command(args)) diff --git a/python/python/glide/async_commands/server_modules/ft_options/ft_constants.py b/python/python/glide/async_commands/server_modules/ft_options/ft_constants.py index 15a978eac8..cd13ea02a7 100644 --- a/python/python/glide/async_commands/server_modules/ft_options/ft_constants.py +++ b/python/python/glide/async_commands/server_modules/ft_options/ft_constants.py @@ -8,6 +8,7 @@ class CommandNames: FT_CREATE = "FT.CREATE" FT_DROPINDEX = "FT.DROPINDEX" + FT_LIST = "FT._LIST" FT_SEARCH = "FT.SEARCH" FT_INFO = "FT.INFO" FT_ALIASADD = "FT.ALIASADD" @@ -42,7 +43,7 @@ class FtCreateKeywords: EF_RUNTIME = "EF_RUNTIME" -class FtSeachKeywords: +class FtSearchKeywords: """ Keywords used in the FT.SEARCH command. """ diff --git a/python/python/glide/async_commands/server_modules/ft_options/ft_create_options.py b/python/python/glide/async_commands/server_modules/ft_options/ft_create_options.py index 90aa2d9fdf..551c160641 100644 --- a/python/python/glide/async_commands/server_modules/ft_options/ft_create_options.py +++ b/python/python/glide/async_commands/server_modules/ft_options/ft_create_options.py @@ -95,7 +95,7 @@ def __init__( Args: name (TEncodable): The name of the field. - type (FieldType): The type of the field. See `FieldType`. + type (FieldType): The type of the field. alias (Optional[TEncodable]): An alias for the field. """ self.name = name @@ -103,7 +103,7 @@ def __init__( self.alias = alias @abstractmethod - def toArgs(self) -> List[TEncodable]: + def to_args(self) -> List[TEncodable]: """ Get the arguments representing the field. @@ -132,14 +132,8 @@ def __init__(self, name: TEncodable, alias: Optional[TEncodable] = None): """ super().__init__(name, FieldType.TEXT, alias) - def toArgs(self) -> List[TEncodable]: - """ - Get the arguments representing the text field. - - Returns: - List[TEncodable]: A list of text field arguments. - """ - args = super().toArgs() + def to_args(self) -> List[TEncodable]: + args = super().to_args() return args @@ -172,14 +166,8 @@ def __init__( self.separator = separator self.case_sensitive = case_sensitive - def toArgs(self) -> List[TEncodable]: - """ - Get the arguments representing the tag field. - - Returns: - List[TEncodable]: A list of tag field arguments. - """ - args = super().toArgs() + def to_args(self) -> List[TEncodable]: + args = super().to_args() if self.separator: args.extend([FtCreateKeywords.SEPARATOR, self.separator]) if self.case_sensitive: @@ -202,14 +190,8 @@ def __init__(self, name: TEncodable, alias: Optional[TEncodable] = None): """ super().__init__(name, FieldType.NUMERIC, alias) - def toArgs(self) -> List[TEncodable]: - """ - Get the arguments representing the numeric field. - - Returns: - List[TEncodable]: A list of numeric field arguments. - """ - args = super().toArgs() + def to_args(self) -> List[TEncodable]: + args = super().to_args() return args @@ -235,7 +217,7 @@ def __init__( self.type = type @abstractmethod - def toArgs(self) -> List[TEncodable]: + def to_args(self) -> List[TEncodable]: """ Get the arguments to be used for the algorithm of the vector field. @@ -276,14 +258,8 @@ def __init__( super().__init__(dimensions, distance_metric, type) self.initial_cap = initial_cap - def toArgs(self) -> List[TEncodable]: - """ - Get the arguments representing the vector field created with FLAT algorithm. - - Returns: - List[TEncodable]: A list of FLAT algorithm type vector arguments. - """ - args = super().toArgs() + def to_args(self) -> List[TEncodable]: + args = super().to_args() if self.initial_cap: args.extend([FtCreateKeywords.INITIAL_CAP, str(self.initial_cap)]) return args @@ -322,14 +298,8 @@ def __init__( self.vectors_examined_on_construction = vectors_examined_on_construction self.vectors_examined_on_runtime = vectors_examined_on_runtime - def toArgs(self) -> List[TEncodable]: - """ - Get the arguments representing the vector field created with HSNW algorithm. - - Returns: - List[TEncodable]: A list of HNSW algorithm type vector arguments. - """ - args = super().toArgs() + def to_args(self) -> List[TEncodable]: + args = super().to_args() if self.initial_cap: args.extend([FtCreateKeywords.INITIAL_CAP, str(self.initial_cap)]) if self.number_of_edges: @@ -365,25 +335,19 @@ def __init__( Args: name (TEncodable): The name of the vector field. - algorithm (VectorAlgorithm): The vector indexing algorithm. See `VectorAlgorithm`. + algorithm (VectorAlgorithm): The vector indexing algorithm. alias (Optional[TEncodable]): An alias for the field. - attributes (VectorFieldAttributes): Additional attributes to be passed with the vector field after the algorithm name. See `VectorFieldAttributes`. + attributes (VectorFieldAttributes): Additional attributes to be passed with the vector field after the algorithm name. """ super().__init__(name, FieldType.VECTOR, alias) self.algorithm = algorithm self.attributes = attributes - def toArgs(self) -> List[TEncodable]: - """ - Get the arguments representing the vector field. - - Returns: - List[TEncodable]: A list of vector field arguments. - """ - args = super().toArgs() + def to_args(self) -> List[TEncodable]: + args = super().to_args() args.append(self.algorithm.value) if self.attributes: - attribute_list = self.attributes.toArgs() + attribute_list = self.attributes.to_args() args.append(str(len(attribute_list))) args.extend(attribute_list) return args @@ -419,13 +383,13 @@ def __init__( Initialize the FT.CREATE optional fields. Args: - data_type (Optional[DataType]): The index data type. If not defined a `HASH` index is created. See `DataType`. + data_type (Optional[DataType]): The index data type. If not defined a `HASH` index is created. prefixes (Optional[List[TEncodable]]): A list of prefixes of index definitions. """ self.data_type = data_type self.prefixes = prefixes - def toArgs(self) -> List[TEncodable]: + def to_args(self) -> List[TEncodable]: """ Get the optional arguments for the FT.CREATE command. diff --git a/python/python/glide/async_commands/server_modules/ft_options/ft_profile_options.py b/python/python/glide/async_commands/server_modules/ft_options/ft_profile_options.py index d6ab8ceb7b..46bbab7b9f 100644 --- a/python/python/glide/async_commands/server_modules/ft_options/ft_profile_options.py +++ b/python/python/glide/async_commands/server_modules/ft_options/ft_profile_options.py @@ -9,7 +9,7 @@ FtProfileKeywords, ) from glide.async_commands.server_modules.ft_options.ft_search_options import ( - FtSeachOptions, + FtSearchOptions, ) from glide.constants import TEncodable @@ -37,8 +37,8 @@ class FtProfileOptions: def __init__( self, query: TEncodable, - queryType: QueryType, - queryOptions: Optional[Union[FtSeachOptions, FtAggregateOptions]] = None, + query_type: QueryType, + query_options: Optional[Union[FtSearchOptions, FtAggregateOptions]] = None, limited: Optional[bool] = False, ): """ @@ -46,20 +46,20 @@ def __init__( Args: query (TEncodable): The query that is being profiled. This is the query argument from the FT.AGGREGATE/FT.SEARCH command. - queryType (Optional[QueryType]): The type of query to be profiled. - queryOptions (Optional[Union[FtSeachOptions, FtAggregateOptions]]): The arguments/options for the FT.AGGREGATE/FT.SEARCH command being profiled. + query_type (Optional[QueryType]): The type of query to be profiled. + query_options (Optional[Union[FtSearchOptions, FtAggregateOptions]]): The arguments/options for the FT.AGGREGATE/FT.SEARCH command being profiled. limited (Optional[bool]): To provide some brief version of the output, otherwise a full verbose output is provided. """ self.query = query - self.queryType = queryType - self.queryOptions = queryOptions + self.query_type = query_type + self.query_options = query_options self.limited = limited @classmethod def from_query_options( cls, query: TEncodable, - queryOptions: Union[FtSeachOptions, FtAggregateOptions], + query_options: Union[FtSearchOptions, FtAggregateOptions], limited: Optional[bool] = False, ): """ @@ -67,27 +67,27 @@ def from_query_options( Args: query (TEncodable): The query that is being profiled. This is the query argument from the FT.AGGREGATE/FT.SEARCH command. - queryOptions (Optional[Union[FtSeachOptions, FtAggregateOptions]]): The arguments/options for the FT.AGGREGATE/FT.SEARCH command being profiled. + query_options (Optional[Union[FtSearchOptions, FtAggregateOptions]]): The arguments/options for the FT.AGGREGATE/FT.SEARCH command being profiled. limited (Optional[bool]): To provide some brief version of the output, otherwise a full verbose output is provided. """ - queryType: QueryType = QueryType.SEARCH - if type(queryOptions) == FtAggregateOptions: - queryType = QueryType.AGGREGATE - return cls(query, queryType, queryOptions, limited) + query_type: QueryType = QueryType.SEARCH + if type(query_options) == FtAggregateOptions: + query_type = QueryType.AGGREGATE + return cls(query, query_type, query_options, limited) @classmethod def from_query_type( - cls, query: TEncodable, queryType: QueryType, limited: Optional[bool] = False + cls, query: TEncodable, query_type: QueryType, limited: Optional[bool] = False ): """ A class method to create FtProfileOptions with QueryType. Args: query (TEncodable): The query that is being profiled. This is the query argument from the FT.AGGREGATE/FT.SEARCH command. - queryType (QueryType): The type of query to be profiled. + query_type (QueryType): The type of query to be profiled. limited (Optional[bool]): To provide some brief version of the output, otherwise a full verbose output is provided. """ - return cls(query, queryType, None, limited) + return cls(query, query_type, None, limited) def to_args(self) -> List[TEncodable]: """ @@ -96,13 +96,13 @@ def to_args(self) -> List[TEncodable]: Returns: List[TEncodable]: A list of remaining arguments for the FT.PROFILE command. """ - args: List[TEncodable] = [self.queryType.value] + args: List[TEncodable] = [self.query_type.value] if self.limited: args.append(FtProfileKeywords.LIMITED) args.extend([FtProfileKeywords.QUERY, self.query]) - if self.queryOptions: - if type(self.queryOptions) == FtAggregateOptions: - args.extend(cast(FtAggregateOptions, self.queryOptions).to_args()) + if self.query_options: + if type(self.query_options) == FtAggregateOptions: + args.extend(cast(FtAggregateOptions, self.query_options).to_args()) else: - args.extend(cast(FtSeachOptions, self.queryOptions).toArgs()) + args.extend(cast(FtSearchOptions, self.query_options).to_args()) return args diff --git a/python/python/glide/async_commands/server_modules/ft_options/ft_search_options.py b/python/python/glide/async_commands/server_modules/ft_options/ft_search_options.py index 79f5422edc..f76b309b0f 100644 --- a/python/python/glide/async_commands/server_modules/ft_options/ft_search_options.py +++ b/python/python/glide/async_commands/server_modules/ft_options/ft_search_options.py @@ -2,7 +2,7 @@ from typing import List, Mapping, Optional -from glide.async_commands.server_modules.ft_options.ft_constants import FtSeachKeywords +from glide.async_commands.server_modules.ft_options.ft_constants import FtSearchKeywords from glide.constants import TEncodable @@ -22,7 +22,7 @@ def __init__(self, offset: int, count: int): self.offset = offset self.count = count - def toArgs(self) -> List[TEncodable]: + def to_args(self) -> List[TEncodable]: """ Get the arguments for the LIMIT option of FT.SEARCH. @@ -30,7 +30,7 @@ def toArgs(self) -> List[TEncodable]: List[TEncodable]: A list of LIMIT option arguments. """ args: List[TEncodable] = [ - FtSeachKeywords.LIMIT, + FtSearchKeywords.LIMIT, str(self.offset), str(self.count), ] @@ -55,7 +55,7 @@ def __init__( self.field_identifier = field_identifier self.alias = alias - def toArgs(self) -> List[TEncodable]: + def to_args(self) -> List[TEncodable]: """ Get the arguments for the RETURN option of FT.SEARCH. @@ -64,12 +64,12 @@ def toArgs(self) -> List[TEncodable]: """ args: List[TEncodable] = [self.field_identifier] if self.alias: - args.append(FtSeachKeywords.AS) + args.append(FtSearchKeywords.AS) args.append(self.alias) return args -class FtSeachOptions: +class FtSearchOptions: """ This class represents the input options to be used in the FT.SEARCH command. All fields in this class are optional inputs for FT.SEARCH. @@ -99,7 +99,7 @@ def __init__( self.limit = limit self.count = count - def toArgs(self) -> List[TEncodable]: + def to_args(self) -> List[TEncodable]: """ Get the optional arguments for the FT.SEARCH command. @@ -109,23 +109,23 @@ def toArgs(self) -> List[TEncodable]: """ args: List[TEncodable] = [] if self.return_fields: - args.append(FtSeachKeywords.RETURN) + args.append(FtSearchKeywords.RETURN) return_field_args: List[TEncodable] = [] for return_field in self.return_fields: - return_field_args.extend(return_field.toArgs()) + return_field_args.extend(return_field.to_args()) args.append(str(len(return_field_args))) args.extend(return_field_args) if self.timeout: - args.append(FtSeachKeywords.TIMEOUT) + args.append(FtSearchKeywords.TIMEOUT) args.append(str(self.timeout)) if self.params: - args.append(FtSeachKeywords.PARAMS) + args.append(FtSearchKeywords.PARAMS) args.append(str(len(self.params))) for name, value in self.params.items(): args.append(name) args.append(value) if self.limit: - args.extend(self.limit.toArgs()) + args.extend(self.limit.to_args()) if self.count: - args.append(FtSeachKeywords.COUNT) + args.append(FtSearchKeywords.COUNT) return args diff --git a/python/python/glide/async_commands/server_modules/json.py b/python/python/glide/async_commands/server_modules/json.py index 3c98672d9f..466679873a 100644 --- a/python/python/glide/async_commands/server_modules/json.py +++ b/python/python/glide/async_commands/server_modules/json.py @@ -207,8 +207,8 @@ async def get( async def arrappend( client: TGlideClient, key: TEncodable, + path: TEncodable, values: List[TEncodable], - path: Optional[TEncodable] = None, ) -> TJsonResponse[int]: """ Appends one or more `values` to the JSON array at the specified `path` within the JSON document stored at `key`. @@ -216,19 +216,18 @@ async def arrappend( Args: client (TGlideClient): The client to execute the command. key (TEncodable): The key of the JSON document. + path (TEncodable): Represents the path within the JSON document where the `values` will be appended. values (TEncodable): The values to append to the JSON array at the specified path. - path (Optional[TEncodable]): Represents the path within the JSON document where the `values` will be appended. - Defaults to None. - **Beware**: For AWS ElastiCache/MemoryDB the `path` parameter is required and not optional. + JSON string values must be wrapped with quotes. For example, to append `"foo"`, pass `"\"foo\""`. Returns: TJsonResponse[int]: For JSONPath (`path` starts with `$`): - Returns a list of integer replies for every possible path, indicating the new length of the new array after appending `values`, + Returns a list of integer replies for every possible path, indicating the new length of the array after appending `values`, or None for JSON values matching the path that are not an array. If `path` doesn't exist, an empty array will be returned. For legacy path (`path` doesn't start with `$`): - Returns the length of the new array after appending `values` to the array at `path`. + Returns the length of the array after appending `values` to the array at `path`. If multiple paths match, the length of the first updated array is returned. If the JSON value at `path` is not a array or if `path` doesn't exist, an error is raised. If `key` doesn't exist, an error is raised. @@ -246,10 +245,7 @@ async def arrappend( >>> json.loads(await valkeyJson.get(client, "doc", ".")) {"a": 1, "b": ["one", "two", "three", "four"]} # Returns the updated JSON document """ - args = ["JSON.ARRAPPEND", key] - if path: - args.append(path) - args.extend(values) + args = ["JSON.ARRAPPEND", key, path] + values return cast(TJsonResponse[int], await client.custom_command(args)) diff --git a/python/python/tests/tests_server_modules/search/test_ft_create.py b/python/python/tests/tests_server_modules/search/test_ft_create.py deleted file mode 100644 index 6655fac0c0..0000000000 --- a/python/python/tests/tests_server_modules/search/test_ft_create.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 -import uuid -from typing import List - -import pytest -from glide.async_commands.server_modules import ft -from glide.async_commands.server_modules.ft_options.ft_create_options import ( - DataType, - DistanceMetricType, - Field, - FtCreateOptions, - NumericField, - TextField, - VectorAlgorithm, - VectorField, - VectorFieldAttributesHnsw, - VectorType, -) -from glide.config import ProtocolVersion -from glide.constants import OK, TEncodable -from glide.glide_client import GlideClusterClient - - -@pytest.mark.asyncio -class TestFtCreate: - @pytest.mark.parametrize("cluster_mode", [True]) - @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_ft_create(self, glide_client: GlideClusterClient): - fields: List[Field] = [] - textFieldTitle: TextField = TextField("$title") - numberField: NumericField = NumericField("$published_at") - textFieldCategory: TextField = TextField("$category") - fields.append(textFieldTitle) - fields.append(numberField) - fields.append(textFieldCategory) - - prefixes: List[TEncodable] = [] - prefixes.append("blog:post:") - - # Create an index with multiple fields with Hash data type. - index = str(uuid.uuid4()) - result = await ft.create( - glide_client, index, fields, FtCreateOptions(DataType.HASH, prefixes) - ) - assert result == OK - assert await ft.dropindex(glide_client, indexName=index) == OK - - # Create an index with multiple fields with JSON data type. - index2 = str(uuid.uuid4()) - result = await ft.create( - glide_client, index2, fields, FtCreateOptions(DataType.JSON, prefixes) - ) - assert result == OK - assert await ft.dropindex(glide_client, indexName=index2) == OK - - # Create an index for vectors of size 2 - # FT.CREATE hash_idx1 ON HASH PREFIX 1 hash: SCHEMA vec AS VEC VECTOR HNSW 6 DIM 2 TYPE FLOAT32 DISTANCE_METRIC L2 - index3 = str(uuid.uuid4()) - prefixes = [] - prefixes.append("hash:") - fields = [] - vectorFieldHash: VectorField = VectorField( - name="vec", - algorithm=VectorAlgorithm.HNSW, - attributes=VectorFieldAttributesHnsw( - dimensions=2, - distance_metric=DistanceMetricType.L2, - type=VectorType.FLOAT32, - ), - alias="VEC", - ) - fields.append(vectorFieldHash) - - result = await ft.create( - glide_client, index3, fields, FtCreateOptions(DataType.HASH, prefixes) - ) - assert result == OK - assert await ft.dropindex(glide_client, indexName=index3) == OK - - # Create a 6-dimensional JSON index using the HNSW algorithm - # FT.CREATE json_idx1 ON JSON PREFIX 1 json: SCHEMA $.vec AS VEC VECTOR HNSW 6 DIM 6 TYPE FLOAT32 DISTANCE_METRIC L2 - index4 = str(uuid.uuid4()) - prefixes = [] - prefixes.append("json:") - fields = [] - vectorFieldJson: VectorField = VectorField( - name="$.vec", - algorithm=VectorAlgorithm.HNSW, - attributes=VectorFieldAttributesHnsw( - dimensions=6, - distance_metric=DistanceMetricType.L2, - type=VectorType.FLOAT32, - ), - alias="VEC", - ) - fields.append(vectorFieldJson) - - result = await ft.create( - glide_client, index4, fields, FtCreateOptions(DataType.JSON, prefixes) - ) - assert result == OK - assert await ft.dropindex(glide_client, indexName=index4) == OK - - # Create an index without FtCreateOptions - - index5 = str(uuid.uuid4()) - result = await ft.create(glide_client, index5, fields, FtCreateOptions()) - assert result == OK - assert await ft.dropindex(glide_client, indexName=index5) == OK - - # TO-DO: - # Add additional tests from VSS documentation that require a combination of commands to run. diff --git a/python/python/tests/tests_server_modules/search/test_ft_dropindex.py b/python/python/tests/tests_server_modules/search/test_ft_dropindex.py deleted file mode 100644 index 717df38eb8..0000000000 --- a/python/python/tests/tests_server_modules/search/test_ft_dropindex.py +++ /dev/null @@ -1,44 +0,0 @@ -import uuid -from typing import List - -import pytest -from glide.async_commands.server_modules import ft -from glide.async_commands.server_modules.ft_options.ft_create_options import ( - DataType, - Field, - FtCreateOptions, - TextField, -) -from glide.config import ProtocolVersion -from glide.constants import OK, TEncodable -from glide.exceptions import RequestError -from glide.glide_client import GlideClusterClient - - -@pytest.mark.asyncio -class TestFtDropIndex: - @pytest.mark.parametrize("cluster_mode", [True]) - @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_ft_dropindex(self, glide_client: GlideClusterClient): - # Index name for the index to be dropped. - indexName = str(uuid.uuid4()) - - fields: List[Field] = [] - textFieldTitle: TextField = TextField("$title") - fields.append(textFieldTitle) - prefixes: List[TEncodable] = [] - prefixes.append("blog:post:") - - # Create an index with multiple fields with Hash data type. - result = await ft.create( - glide_client, indexName, fields, FtCreateOptions(DataType.HASH, prefixes) - ) - assert result == OK - - # Drop the index. Expects "OK" as a response. - result = await ft.dropindex(glide_client, indexName) - assert result == OK - - # Drop a non existent index. Expects a RequestError. - with pytest.raises(RequestError): - await ft.dropindex(glide_client, indexName) diff --git a/python/python/tests/tests_server_modules/search/test_ft_search.py b/python/python/tests/tests_server_modules/search/test_ft_search.py deleted file mode 100644 index bece8e1434..0000000000 --- a/python/python/tests/tests_server_modules/search/test_ft_search.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - -import json -import time -import uuid -from typing import List, Mapping, Union, cast - -import pytest -from glide.async_commands.server_modules import ft -from glide.async_commands.server_modules import json as GlideJson -from glide.async_commands.server_modules.ft_options.ft_create_options import ( - DataType, - FtCreateOptions, - NumericField, -) -from glide.async_commands.server_modules.ft_options.ft_profile_options import ( - FtProfileOptions, -) -from glide.async_commands.server_modules.ft_options.ft_search_options import ( - FtSeachOptions, - ReturnField, -) -from glide.config import ProtocolVersion -from glide.constants import OK, FtSearchResponse, TEncodable -from glide.glide_client import GlideClusterClient - - -@pytest.mark.asyncio -class TestFtSearch: - sleep_wait_time = 0.5 # This value is in seconds - - @pytest.mark.parametrize("cluster_mode", [True]) - @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_ft_search(self, glide_client: GlideClusterClient): - prefix = "{json-search-" + str(uuid.uuid4()) + "}:" - json_key1 = prefix + str(uuid.uuid4()) - json_key2 = prefix + str(uuid.uuid4()) - json_value1 = {"a": 11111, "b": 2, "c": 3} - json_value2 = {"a": 22222, "b": 2, "c": 3} - prefixes: List[TEncodable] = [] - prefixes.append(prefix) - index = prefix + str(uuid.uuid4()) - - # Create an index. - assert ( - await ft.create( - glide_client, - index, - schema=[ - NumericField("$.a", "a"), - NumericField("$.b", "b"), - ], - options=FtCreateOptions(DataType.JSON), - ) - == OK - ) - - # Create a json key. - assert ( - await GlideJson.set(glide_client, json_key1, "$", json.dumps(json_value1)) - == OK - ) - assert ( - await GlideJson.set(glide_client, json_key2, "$", json.dumps(json_value2)) - == OK - ) - - # Wait for index to be updated to avoid this error - ResponseError: The index is under construction. - time.sleep(self.sleep_wait_time) - - ftSearchOptions = FtSeachOptions( - return_fields=[ - ReturnField(field_identifier="a", alias="a_new"), - ReturnField(field_identifier="b", alias="b_new"), - ] - ) - - # Search the index for string inputs. - result1 = await ft.search(glide_client, index, "*", options=ftSearchOptions) - # Check if we get the expected result from ft.search for string inputs. - TestFtSearch._ft_search_deep_compare_result( - self, - result=result1, - json_key1=json_key1, - json_key2=json_key2, - json_value1=json_value1, - json_value2=json_value2, - fieldName1="a", - fieldName2="b", - ) - - # Test FT.PROFILE for the above mentioned FT.SEARCH query and search options. - - ftProfileResult = await ft.profile( - glide_client, - index, - FtProfileOptions.from_query_options( - query="*", queryOptions=ftSearchOptions - ), - ) - print(ftProfileResult) - assert len(ftProfileResult) > 0 - - # Check if we get the expected result from FT.PROFILE for string inputs. - TestFtSearch._ft_search_deep_compare_result( - self, - result=cast(FtSearchResponse, ftProfileResult[0]), - json_key1=json_key1, - json_key2=json_key2, - json_value1=json_value1, - json_value2=json_value2, - fieldName1="a", - fieldName2="b", - ) - ftSearchOptionsByteInput = FtSeachOptions( - return_fields=[ - ReturnField(field_identifier=b"a", alias=b"a_new"), - ReturnField(field_identifier=b"b", alias=b"b_new"), - ] - ) - - # Search the index for byte type inputs. - result2 = await ft.search( - glide_client, bytes(index, "utf-8"), b"*", options=ftSearchOptionsByteInput - ) - - # Check if we get the expected result from ft.search for byte type inputs. - TestFtSearch._ft_search_deep_compare_result( - self, - result=result2, - json_key1=json_key1, - json_key2=json_key2, - json_value1=json_value1, - json_value2=json_value2, - fieldName1="a", - fieldName2="b", - ) - - # Test FT.PROFILE for the above mentioned FT.SEARCH query and search options for byte type inputs. - ftProfileResult = await ft.profile( - glide_client, - index, - FtProfileOptions.from_query_options( - query=b"*", queryOptions=ftSearchOptionsByteInput - ), - ) - assert len(ftProfileResult) > 0 - - # Check if we get the expected result from FT.PROFILE for byte type inputs. - TestFtSearch._ft_search_deep_compare_result( - self, - result=cast(FtSearchResponse, ftProfileResult[0]), - json_key1=json_key1, - json_key2=json_key2, - json_value1=json_value1, - json_value2=json_value2, - fieldName1="a", - fieldName2="b", - ) - - assert await ft.dropindex(glide_client, indexName=index) == OK - - def _ft_search_deep_compare_result( - self, - result: List[Union[int, Mapping[TEncodable, Mapping[TEncodable, TEncodable]]]], - json_key1: str, - json_key2: str, - json_value1: dict, - json_value2: dict, - fieldName1: str, - fieldName2: str, - ): - """ - Deep compare the keys and values in FT.SEARCH result array. - - Args: - result (List[Union[int, Mapping[TEncodable, Mapping[TEncodable, TEncodable]]]]): - json_key1 (str): The first key in search result. - json_key2 (str): The second key in the search result. - json_value1 (dict): The fields map for first key in the search result. - json_value2 (dict): The fields map for second key in the search result. - """ - assert len(result) == 2 - assert result[0] == 2 - searchResultMap: Mapping[TEncodable, Mapping[TEncodable, TEncodable]] = cast( - Mapping[TEncodable, Mapping[TEncodable, TEncodable]], result[1] - ) - expectedResultMap: Mapping[TEncodable, Mapping[TEncodable, TEncodable]] = { - json_key1.encode(): { - fieldName1.encode(): str(json_value1.get(fieldName1)).encode(), - fieldName2.encode(): str(json_value1.get(fieldName2)).encode(), - }, - json_key2.encode(): { - fieldName1.encode(): str(json_value2.get(fieldName1)).encode(), - fieldName2.encode(): str(json_value2.get(fieldName2)).encode(), - }, - } - assert searchResultMap == expectedResultMap diff --git a/python/python/tests/tests_server_modules/test_ft.py b/python/python/tests/tests_server_modules/test_ft.py index 5c49e5e7c2..a4f2fa099e 100644 --- a/python/python/tests/tests_server_modules/test_ft.py +++ b/python/python/tests/tests_server_modules/test_ft.py @@ -1,4 +1,6 @@ # Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + +import json import time import uuid from typing import List, Mapping, Union, cast @@ -9,7 +11,6 @@ from glide.async_commands.server_modules import json as GlideJson from glide.async_commands.server_modules.ft_options.ft_aggregate_options import ( FtAggregateApply, - FtAggregateClause, FtAggregateGroupBy, FtAggregateOptions, FtAggregateReducer, @@ -32,8 +33,12 @@ from glide.async_commands.server_modules.ft_options.ft_profile_options import ( FtProfileOptions, ) +from glide.async_commands.server_modules.ft_options.ft_search_options import ( + FtSearchOptions, + ReturnField, +) from glide.config import ProtocolVersion -from glide.constants import OK, TEncodable +from glide.constants import OK, FtSearchResponse, TEncodable from glide.exceptions import RequestError from glide.glide_client import GlideClusterClient @@ -53,86 +58,425 @@ class TestFt: sleep_wait_time = 1 # This value is in seconds + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_ft_create(self, glide_client: GlideClusterClient): + fields: List[Field] = [ + TextField("$title"), + NumericField("$published_at"), + TextField("$category"), + ] + prefixes: List[TEncodable] = ["blog:post:"] + + # Create an index with multiple fields with Hash data type. + index = str(uuid.uuid4()) + assert ( + await ft.create( + glide_client, index, fields, FtCreateOptions(DataType.HASH, prefixes) + ) + == OK + ) + assert await ft.dropindex(glide_client, index) == OK + + # Create an index with multiple fields with JSON data type. + index2 = str(uuid.uuid4()) + assert ( + await ft.create( + glide_client, index2, fields, FtCreateOptions(DataType.JSON, prefixes) + ) + == OK + ) + assert await ft.dropindex(glide_client, index2) == OK + + # Create an index for vectors of size 2 + # FT.CREATE hash_idx1 ON HASH PREFIX 1 hash: SCHEMA vec AS VEC VECTOR HNSW 6 DIM 2 TYPE FLOAT32 DISTANCE_METRIC L2 + index3 = str(uuid.uuid4()) + prefixes = ["hash:"] + fields = [ + VectorField( + name="vec", + algorithm=VectorAlgorithm.HNSW, + attributes=VectorFieldAttributesHnsw( + dimensions=2, + distance_metric=DistanceMetricType.L2, + type=VectorType.FLOAT32, + ), + alias="VEC", + ) + ] + + assert ( + await ft.create( + glide_client, index3, fields, FtCreateOptions(DataType.HASH, prefixes) + ) + == OK + ) + assert await ft.dropindex(glide_client, index3) == OK + + # Create a 6-dimensional JSON index using the HNSW algorithm + # FT.CREATE json_idx1 ON JSON PREFIX 1 json: SCHEMA $.vec AS VEC VECTOR HNSW 6 DIM 6 TYPE FLOAT32 DISTANCE_METRIC L2 + index4 = str(uuid.uuid4()) + prefixes = ["json:"] + fields = [ + VectorField( + name="$.vec", + algorithm=VectorAlgorithm.HNSW, + attributes=VectorFieldAttributesHnsw( + dimensions=6, + distance_metric=DistanceMetricType.L2, + type=VectorType.FLOAT32, + ), + alias="VEC", + ) + ] + + assert ( + await ft.create( + glide_client, index4, fields, FtCreateOptions(DataType.JSON, prefixes) + ) + == OK + ) + assert await ft.dropindex(glide_client, index4) == OK + + # Create an index without FtCreateOptions + + index5 = str(uuid.uuid4()) + assert await ft.create(glide_client, index5, fields, FtCreateOptions()) == OK + assert await ft.dropindex(glide_client, index5) == OK + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_ft_create_byte_type_input(self, glide_client: GlideClusterClient): + fields: List[Field] = [ + TextField(b"$title"), + NumericField(b"$published_at"), + TextField(b"$category"), + ] + prefixes: List[TEncodable] = [b"blog:post:"] + + # Create an index with multiple fields with Hash data type with byte type input. + index = str(uuid.uuid4()) + assert ( + await ft.create( + glide_client, + index.encode("utf-8"), + fields, + FtCreateOptions(DataType.HASH, prefixes), + ) + == OK + ) + assert await ft.dropindex(glide_client, index) == OK + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_ft_dropindex(self, glide_client: GlideClusterClient): + # Index name for the index to be dropped. + index_name = str(uuid.uuid4()) + fields: List[Field] = [TextField("$title")] + prefixes: List[TEncodable] = ["blog:post:"] + + # Create an index with multiple fields with Hash data type. + assert ( + await ft.create( + glide_client, + index_name, + fields, + FtCreateOptions(DataType.HASH, prefixes), + ) + == OK + ) + + # Drop the index. Expects "OK" as a response. + assert await ft.dropindex(glide_client, index_name) == OK + + # Create an index with multiple fields with Hash data type for byte type testing + index_name_for_bytes_type_input = str(uuid.uuid4()) + assert ( + await ft.create( + glide_client, + index_name_for_bytes_type_input, + fields, + FtCreateOptions(DataType.HASH, prefixes), + ) + == OK + ) + + # Drop the index. Expects "OK" as a response. + assert ( + await ft.dropindex( + glide_client, index_name_for_bytes_type_input.encode("utf-8") + ) + == OK + ) + + # Drop a non existent index. Expects a RequestError. + with pytest.raises(RequestError): + await ft.dropindex(glide_client, index_name) + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_ft_search(self, glide_client: GlideClusterClient): + prefix = "{json-search-" + str(uuid.uuid4()) + "}:" + json_key1 = prefix + str(uuid.uuid4()) + json_key2 = prefix + str(uuid.uuid4()) + json_value1 = {"a": 11111, "b": 2, "c": 3} + json_value2 = {"a": 22222, "b": 2, "c": 3} + index = prefix + str(uuid.uuid4()) + + # Create an index. + assert ( + await ft.create( + glide_client, + index, + schema=[ + NumericField("$.a", "a"), + NumericField("$.b", "b"), + ], + options=FtCreateOptions(DataType.JSON), + ) + == OK + ) + + # Create a json key. + assert ( + await GlideJson.set(glide_client, json_key1, "$", json.dumps(json_value1)) + == OK + ) + assert ( + await GlideJson.set(glide_client, json_key2, "$", json.dumps(json_value2)) + == OK + ) + + # Wait for index to be updated to avoid this error - ResponseError: The index is under construction. + time.sleep(self.sleep_wait_time) + + ft_search_options = FtSearchOptions( + return_fields=[ + ReturnField(field_identifier="a", alias="a_new"), + ReturnField(field_identifier="b", alias="b_new"), + ] + ) + + # Search the index for string inputs. + result1 = await ft.search(glide_client, index, "*", options=ft_search_options) + # Check if we get the expected result from ft.search for string inputs. + TestFt._ft_search_deep_compare_result( + self, + result=result1, + json_key1=json_key1, + json_key2=json_key2, + json_value1=json_value1, + json_value2=json_value2, + fieldName1="a", + fieldName2="b", + ) + + # Test FT.PROFILE for the above mentioned FT.SEARCH query and search options. + + ft_profile_result = await ft.profile( + glide_client, + index, + FtProfileOptions.from_query_options( + query="*", query_options=ft_search_options + ), + ) + assert len(ft_profile_result) > 0 + + # Check if we get the expected result from FT.PROFILE for string inputs. + TestFt._ft_search_deep_compare_result( + self, + result=cast(FtSearchResponse, ft_profile_result[0]), + json_key1=json_key1, + json_key2=json_key2, + json_value1=json_value1, + json_value2=json_value2, + fieldName1="a", + fieldName2="b", + ) + ft_search_options_bytes_input = FtSearchOptions( + return_fields=[ + ReturnField(field_identifier=b"a", alias=b"a_new"), + ReturnField(field_identifier=b"b", alias=b"b_new"), + ] + ) + + # Search the index for byte type inputs. + result2 = await ft.search( + glide_client, + index.encode("utf-8"), + b"*", + options=ft_search_options_bytes_input, + ) + + # Check if we get the expected result from ft.search for byte type inputs. + TestFt._ft_search_deep_compare_result( + self, + result=result2, + json_key1=json_key1, + json_key2=json_key2, + json_value1=json_value1, + json_value2=json_value2, + fieldName1="a", + fieldName2="b", + ) + + # Test FT.PROFILE for the above mentioned FT.SEARCH query and search options for byte type inputs. + ft_profile_result = await ft.profile( + glide_client, + index.encode("utf-8"), + FtProfileOptions.from_query_options( + query=b"*", query_options=ft_search_options_bytes_input + ), + ) + assert len(ft_profile_result) > 0 + + # Check if we get the expected result from FT.PROFILE for byte type inputs. + TestFt._ft_search_deep_compare_result( + self, + result=cast(FtSearchResponse, ft_profile_result[0]), + json_key1=json_key1, + json_key2=json_key2, + json_value1=json_value1, + json_value2=json_value2, + fieldName1="a", + fieldName2="b", + ) + + assert await ft.dropindex(glide_client, index) == OK + + def _ft_search_deep_compare_result( + self, + result: List[Union[int, Mapping[TEncodable, Mapping[TEncodable, TEncodable]]]], + json_key1: str, + json_key2: str, + json_value1: dict, + json_value2: dict, + fieldName1: str, + fieldName2: str, + ): + """ + Deep compare the keys and values in FT.SEARCH result array. + + Args: + result (List[Union[int, Mapping[TEncodable, Mapping[TEncodable, TEncodable]]]]): + json_key1 (str): The first key in search result. + json_key2 (str): The second key in the search result. + json_value1 (dict): The fields map for first key in the search result. + json_value2 (dict): The fields map for second key in the search result. + """ + assert len(result) == 2 + assert result[0] == 2 + search_result_map: Mapping[TEncodable, Mapping[TEncodable, TEncodable]] = cast( + Mapping[TEncodable, Mapping[TEncodable, TEncodable]], result[1] + ) + expected_result_map: Mapping[TEncodable, Mapping[TEncodable, TEncodable]] = { + json_key1.encode(): { + fieldName1.encode(): str(json_value1.get(fieldName1)).encode(), + fieldName2.encode(): str(json_value1.get(fieldName2)).encode(), + }, + json_key2.encode(): { + fieldName1.encode(): str(json_value2.get(fieldName1)).encode(), + fieldName2.encode(): str(json_value2.get(fieldName2)).encode(), + }, + } + assert search_result_map == expected_result_map + @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_ft_aliasadd(self, glide_client: GlideClusterClient): - indexName: str = str(uuid.uuid4()) + index_name: str = str(uuid.uuid4()) alias: str = "alias" # Test ft.aliasadd throws an error if index does not exist. with pytest.raises(RequestError): - await ft.aliasadd(glide_client, alias, indexName) + await ft.aliasadd(glide_client, alias, index_name) # Test ft.aliasadd successfully adds an alias to an existing index. - await TestFt._create_test_index_hash_type(self, glide_client, indexName) - assert await ft.aliasadd(glide_client, alias, indexName) == OK - assert await ft.dropindex(glide_client, indexName=indexName) == OK + await TestFt._create_test_index_hash_type(self, glide_client, index_name) + assert await ft.aliasadd(glide_client, alias, index_name) == OK + assert await ft.dropindex(glide_client, index_name) == OK # Test ft.aliasadd for input of bytes type. - indexNameString = str(uuid.uuid4()) - indexNameBytes = bytes(indexNameString, "utf-8") - aliasNameBytes = b"alias-bytes" - await TestFt._create_test_index_hash_type(self, glide_client, indexNameString) - assert await ft.aliasadd(glide_client, aliasNameBytes, indexNameBytes) == OK - assert await ft.dropindex(glide_client, indexName=indexNameString) == OK + index_name_string = str(uuid.uuid4()) + index_names_bytes = index_name_string.encode("utf-8") + alias_name_bytes = b"alias-bytes" + await TestFt._create_test_index_hash_type(self, glide_client, index_name_string) + assert ( + await ft.aliasadd(glide_client, alias_name_bytes, index_names_bytes) == OK + ) + assert await ft.dropindex(glide_client, index_name_string) == OK @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_ft_aliasdel(self, glide_client: GlideClusterClient): - indexName: TEncodable = str(uuid.uuid4()) + index_name: TEncodable = str(uuid.uuid4()) alias: str = "alias" - await TestFt._create_test_index_hash_type(self, glide_client, indexName) + await TestFt._create_test_index_hash_type(self, glide_client, index_name) # Test if deleting a non existent alias throws an error. with pytest.raises(RequestError): await ft.aliasdel(glide_client, alias) # Test if an existing alias is deleted successfully. - assert await ft.aliasadd(glide_client, alias, indexName) == OK + assert await ft.aliasadd(glide_client, alias, index_name) == OK assert await ft.aliasdel(glide_client, alias) == OK # Test if an existing alias is deleted successfully for bytes type input. - assert await ft.aliasadd(glide_client, alias, indexName) == OK - assert await ft.aliasdel(glide_client, bytes(alias, "utf-8")) == OK + assert await ft.aliasadd(glide_client, alias, index_name) == OK + assert await ft.aliasdel(glide_client, alias.encode("utf-8")) == OK - assert await ft.dropindex(glide_client, indexName=indexName) == OK + assert await ft.dropindex(glide_client, index_name) == OK @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_ft_aliasupdate(self, glide_client: GlideClusterClient): - indexName: str = str(uuid.uuid4()) + index_name: str = str(uuid.uuid4()) alias: str = "alias" - await TestFt._create_test_index_hash_type(self, glide_client, indexName) - assert await ft.aliasadd(glide_client, alias, indexName) == OK - newAliasName: str = "newAlias" - newIndexName: str = str(uuid.uuid4()) + await TestFt._create_test_index_hash_type(self, glide_client, index_name) + assert await ft.aliasadd(glide_client, alias, index_name) == OK + new_alias_name: str = "newAlias" + new_index_name: str = str(uuid.uuid4()) - await TestFt._create_test_index_hash_type(self, glide_client, newIndexName) - assert await ft.aliasadd(glide_client, newAliasName, newIndexName) == OK + await TestFt._create_test_index_hash_type(self, glide_client, new_index_name) + assert await ft.aliasadd(glide_client, new_alias_name, new_index_name) == OK # Test if updating an already existing alias to point to an existing index returns "OK". - assert await ft.aliasupdate(glide_client, newAliasName, indexName) == OK + assert await ft.aliasupdate(glide_client, new_alias_name, index_name) == OK assert ( await ft.aliasupdate( - glide_client, bytes(alias, "utf-8"), bytes(newIndexName, "utf-8") + glide_client, alias.encode("utf-8"), new_index_name.encode("utf-8") ) == OK ) - assert await ft.dropindex(glide_client, indexName=indexName) == OK - assert await ft.dropindex(glide_client, indexName=newIndexName) == OK + assert await ft.dropindex(glide_client, index_name) == OK + assert await ft.dropindex(glide_client, new_index_name) == OK + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_ft_dropindex_ft_list(self, glide_client: GlideClusterClient): + indexName = str(uuid.uuid4()).encode() + await TestFt._create_test_index_hash_type(self, glide_client, indexName) + + before = await ft.list(glide_client) + assert indexName in before + + assert await ft.dropindex(glide_client, indexName) == OK + after = await ft.list(glide_client) + assert indexName not in after + + assert {_ for _ in after + [indexName]} == {_ for _ in before} + + # Drop a non existent index. Expects a RequestError. + with pytest.raises(RequestError): + await ft.dropindex(glide_client, indexName) async def _create_test_index_hash_type( self, glide_client: GlideClusterClient, index_name: TEncodable ): # Helper function used for creating a basic index with hash data type with one text field. - fields: List[Field] = [] - text_field_title: TextField = TextField("title") - fields.append(text_field_title) - + fields: List[Field] = [TextField("title")] prefix = "{hash-search-" + str(uuid.uuid4()) + "}:" - prefixes: List[TEncodable] = [] - prefixes.append(prefix) - + prefixes: List[TEncodable] = [prefix] result = await ft.create( glide_client, index_name, fields, FtCreateOptions(DataType.HASH, prefixes) ) @@ -141,14 +485,29 @@ async def _create_test_index_hash_type( @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_ft_info(self, glide_client: GlideClusterClient): - indexName = str(uuid.uuid4()) + index_name = str(uuid.uuid4()) await TestFt._create_test_index_with_vector_field( - self, glide_client=glide_client, index_name=indexName + self, glide_client, index_name ) - result = await ft.info(glide_client, indexName) - assert await ft.dropindex(glide_client, indexName=indexName) == OK + result = await ft.info(glide_client, index_name) + assert await ft.dropindex(glide_client, index_name) == OK + TestFt._ft_info_deep_compare_result(self, index_name, result) + + # Test for bytes type input. + index_name_for_bytes_input = str(uuid.uuid4()) + await TestFt._create_test_index_with_vector_field( + self, glide_client, index_name_for_bytes_input + ) + result = await ft.info(glide_client, index_name_for_bytes_input.encode("utf-8")) + assert await ft.dropindex(glide_client, index_name_for_bytes_input) == OK + TestFt._ft_info_deep_compare_result(self, index_name_for_bytes_input, result) + + # Querying a missing index throws an error. + with pytest.raises(RequestError): + await ft.info(glide_client, str(uuid.uuid4())) - assert indexName.encode() == result.get(b"index_name") + def _ft_info_deep_compare_result(self, index_name: str, result): + assert index_name.encode() == result.get(b"index_name") assert b"JSON" == result.get(b"key_type") assert [b"key-prefix"] == result.get(b"key_prefixes") @@ -157,65 +516,58 @@ async def test_ft_info(self, glide_client: GlideClusterClient): TestFt.SerchResultFieldsList, result.get(b"fields") ) assert len(fields) == 2 - textField: TestFt.SearchResultField = {} - vectorField: TestFt.SearchResultField = {} + text_field: TestFt.SearchResultField = {} + vector_field: TestFt.SearchResultField = {} if fields[0].get(b"type") == b"VECTOR": - vectorField = cast(TestFt.SearchResultField, fields[0]) - textField = cast(TestFt.SearchResultField, fields[1]) + vector_field = cast(TestFt.SearchResultField, fields[0]) + text_field = cast(TestFt.SearchResultField, fields[1]) else: - vectorField = cast(TestFt.SearchResultField, fields[1]) - textField = cast(TestFt.SearchResultField, fields[0]) + vector_field = cast(TestFt.SearchResultField, fields[1]) + text_field = cast(TestFt.SearchResultField, fields[0]) # Compare vector field arguments - assert b"$.vec" == vectorField.get(b"identifier") - assert b"VECTOR" == vectorField.get(b"type") - assert b"VEC" == vectorField.get(b"field_name") - vectorFieldParams: Mapping[TEncodable, Union[TEncodable, int]] = cast( + assert b"$.vec" == vector_field.get(b"identifier") + assert b"VECTOR" == vector_field.get(b"type") + assert b"VEC" == vector_field.get(b"field_name") + vector_field_params: Mapping[TEncodable, Union[TEncodable, int]] = cast( Mapping[TEncodable, Union[TEncodable, int]], - vectorField.get(b"vector_params"), + vector_field.get(b"vector_params"), ) - assert DistanceMetricType.L2.value.encode() == vectorFieldParams.get( + assert DistanceMetricType.L2.value.encode() == vector_field_params.get( b"distance_metric" ) - assert 2 == vectorFieldParams.get(b"dimension") - assert b"HNSW" == vectorFieldParams.get(b"algorithm") - assert b"FLOAT32" == vectorFieldParams.get(b"data_type") + assert 2 == vector_field_params.get(b"dimension") + assert b"HNSW" == vector_field_params.get(b"algorithm") + assert b"FLOAT32" == vector_field_params.get(b"data_type") # Compare text field arguments. - assert b"$.text-field" == textField.get(b"identifier") - assert b"TEXT" == textField.get(b"type") - assert b"text-field" == textField.get(b"field_name") - - # Querying a missing index throws an error. - with pytest.raises(RequestError): - await ft.info(glide_client, str(uuid.uuid4())) + assert b"$.text-field" == text_field.get(b"identifier") + assert b"TEXT" == text_field.get(b"type") + assert b"text-field" == text_field.get(b"field_name") async def _create_test_index_with_vector_field( self, glide_client: GlideClusterClient, index_name: TEncodable ): # Helper function used for creating an index with JSON data type with a text and vector field. - fields: List[Field] = [] - textField: Field = TextField("$.text-field", "text-field") - - vectorFieldHash: VectorField = VectorField( - name="$.vec", - algorithm=VectorAlgorithm.HNSW, - attributes=VectorFieldAttributesHnsw( - dimensions=2, - distance_metric=DistanceMetricType.L2, - type=VectorType.FLOAT32, + fields: List[Field] = [ + VectorField( + name="$.vec", + algorithm=VectorAlgorithm.HNSW, + attributes=VectorFieldAttributesHnsw( + dimensions=2, + distance_metric=DistanceMetricType.L2, + type=VectorType.FLOAT32, + ), + alias="VEC", ), - alias="VEC", - ) - fields.append(vectorFieldHash) - fields.append(textField) + TextField("$.text-field", "text-field"), + ] - prefixes: List[TEncodable] = [] - prefixes.append("key-prefix") + prefixes: List[TEncodable] = ["key-prefix"] await ft.create( glide_client, - indexName=index_name, + index_name, schema=fields, options=FtCreateOptions(DataType.JSON, prefixes=prefixes), ) @@ -223,76 +575,76 @@ async def _create_test_index_with_vector_field( @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_ft_explain(self, glide_client: GlideClusterClient): - indexName = str(uuid.uuid4()) + index_name = str(uuid.uuid4()) await TestFt._create_test_index_for_ft_explain_commands( - self=self, glide_client=glide_client, index_name=indexName + self, glide_client, index_name ) # FT.EXPLAIN on a search query containing numeric field. query = "@price:[0 10]" - result = await ft.explain(glide_client, indexName=indexName, query=query) - resultString = cast(bytes, result).decode(encoding="utf-8") - assert "price" in resultString and "0" in resultString and "10" in resultString + result = await ft.explain(glide_client, index_name, query) + result_string = cast(bytes, result).decode(encoding="utf-8") + assert ( + "price" in result_string and "0" in result_string and "10" in result_string + ) # FT.EXPLAIN on a search query containing numeric field and having bytes type input to the command. - result = await ft.explain( - glide_client, indexName=indexName.encode(), query=query.encode() + result = await ft.explain(glide_client, index_name.encode(), query.encode()) + result_string = cast(bytes, result).decode(encoding="utf-8") + assert ( + "price" in result_string and "0" in result_string and "10" in result_string ) - resultString = cast(bytes, result).decode(encoding="utf-8") - assert "price" in resultString and "0" in resultString and "10" in resultString # FT.EXPLAIN on a search query that returns all data. - result = await ft.explain(glide_client, indexName=indexName, query="*") - resultString = cast(bytes, result).decode(encoding="utf-8") - assert "*" in resultString + result = await ft.explain(glide_client, index_name, query="*") + result_string = cast(bytes, result).decode(encoding="utf-8") + assert "*" in result_string - assert await ft.dropindex(glide_client, indexName=indexName) + assert await ft.dropindex(glide_client, index_name) # FT.EXPLAIN on a missing index throws an error. with pytest.raises(RequestError): - await ft.explain(glide_client, str(uuid.uuid4()), "*") + await ft.explain(glide_client, str(uuid.uuid4()), query="*") @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_ft_explaincli(self, glide_client: GlideClusterClient): - indexName = str(uuid.uuid4()) + index_name = str(uuid.uuid4()) await TestFt._create_test_index_for_ft_explain_commands( - self=self, glide_client=glide_client, index_name=indexName + self, glide_client, index_name ) # FT.EXPLAINCLI on a search query containing numeric field. query = "@price:[0 10]" - result = await ft.explaincli(glide_client, indexName=indexName, query=query) - resultStringArr = [] + result = await ft.explaincli(glide_client, index_name, query) + result_string_arr = [] for i in result: - resultStringArr.append(cast(bytes, i).decode(encoding="utf-8").strip()) + result_string_arr.append(cast(bytes, i).decode(encoding="utf-8").strip()) assert ( - "price" in resultStringArr - and "0" in resultStringArr - and "10" in resultStringArr + "price" in result_string_arr + and "0" in result_string_arr + and "10" in result_string_arr ) # FT.EXPLAINCLI on a search query containing numeric field and having bytes type input to the command. - result = await ft.explaincli( - glide_client, indexName=indexName.encode(), query=query.encode() - ) - resultStringArr = [] + result = await ft.explaincli(glide_client, index_name.encode(), query.encode()) + result_string_arr = [] for i in result: - resultStringArr.append(cast(bytes, i).decode(encoding="utf-8").strip()) + result_string_arr.append(cast(bytes, i).decode(encoding="utf-8").strip()) assert ( - "price" in resultStringArr - and "0" in resultStringArr - and "10" in resultStringArr + "price" in result_string_arr + and "0" in result_string_arr + and "10" in result_string_arr ) # FT.EXPLAINCLI on a search query that returns all data. - result = await ft.explaincli(glide_client, indexName=indexName, query="*") - resultStringArr = [] + result = await ft.explaincli(glide_client, index_name, query="*") + result_string_arr = [] for i in result: - resultStringArr.append(cast(bytes, i).decode(encoding="utf-8").strip()) - assert "*" in resultStringArr + result_string_arr.append(cast(bytes, i).decode(encoding="utf-8").strip()) + assert "*" in result_string_arr - assert await ft.dropindex(glide_client, indexName=indexName) + assert await ft.dropindex(glide_client, index_name) # FT.EXPLAINCLI on a missing index throws an error. with pytest.raises(RequestError): @@ -302,15 +654,9 @@ async def _create_test_index_for_ft_explain_commands( self, glide_client: GlideClusterClient, index_name: TEncodable ): # Helper function used for creating an index having hash data type, one text field and one numeric field. - fields: List[Field] = [] - numeric_field: NumericField = NumericField("price") - text_field: TextField = TextField("title") - fields.append(text_field) - fields.append(numeric_field) - + fields: List[Field] = [TextField("title"), NumericField("price")] prefix = "{hash-search-" + str(uuid.uuid4()) + "}:" - prefixes: List[TEncodable] = [] - prefixes.append(prefix) + prefixes: List[TEncodable] = [prefix] assert ( await ft.create( @@ -327,20 +673,20 @@ async def _create_test_index_for_ft_explain_commands( async def test_ft_aggregate_with_bicycles_data( self, glide_client: GlideClusterClient, protocol ): - prefixBicycles = "{bicycles}:" - indexBicycles = prefixBicycles + str(uuid.uuid4()) + prefix_bicycles = "{bicycles}:" + index_bicycles = prefix_bicycles + str(uuid.uuid4()) await TestFt._create_index_for_ft_aggregate_with_bicycles_data( - self=self, - glide_client=glide_client, - index_name=indexBicycles, - prefix=prefixBicycles, + self, + glide_client, + index_bicycles, + prefix_bicycles, ) await TestFt._create_json_keys_for_ft_aggregate_with_bicycles_data( - self=self, glide_client=glide_client, prefix=prefixBicycles + self, glide_client, prefix_bicycles ) time.sleep(self.sleep_wait_time) - ftAggregateOptions: FtAggregateOptions = FtAggregateOptions( + ft_aggregate_options: FtAggregateOptions = FtAggregateOptions( loadFields=["__key"], clauses=[ FtAggregateGroupBy( @@ -352,12 +698,12 @@ async def test_ft_aggregate_with_bicycles_data( # Run FT.AGGREGATE command with the following arguments: ['FT.AGGREGATE', '{bicycles}:1e15faab-a870-488e-b6cd-f2b76c6916a3', '*', 'LOAD', '1', '__key', 'GROUPBY', '1', '@condition', 'REDUCE', 'COUNT', '0', 'AS', 'bicycles'] result = await ft.aggregate( glide_client, - indexName=indexBicycles, + index_bicycles, query="*", - options=ftAggregateOptions, + options=ft_aggregate_options, ) - sortedResult = sorted(result, key=lambda x: (x[b"condition"], x[b"bicycles"])) - expectedResult = sorted( + sorted_result = sorted(result, key=lambda x: (x[b"condition"], x[b"bicycles"])) + expected_result = sorted( [ { b"condition": b"refurbished", @@ -374,75 +720,77 @@ async def test_ft_aggregate_with_bicycles_data( ], key=lambda x: (x[b"condition"], x[b"bicycles"]), ) - assert sortedResult == expectedResult + assert sorted_result == expected_result # Test FT.PROFILE for the above mentioned FT.AGGREGATE query - ftProfileResult = await ft.profile( + ft_profile_result = await ft.profile( glide_client, - indexBicycles, + index_bicycles, FtProfileOptions.from_query_options( - query="*", queryOptions=ftAggregateOptions + query="*", query_options=ft_aggregate_options ), ) - assert len(ftProfileResult) > 0 + assert len(ft_profile_result) > 0 assert ( - sorted(ftProfileResult[0], key=lambda x: (x[b"condition"], x[b"bicycles"])) - == expectedResult + sorted( + ft_profile_result[0], key=lambda x: (x[b"condition"], x[b"bicycles"]) + ) + == expected_result ) - assert await ft.dropindex(glide_client, indexName=indexBicycles) == OK + assert await ft.dropindex(glide_client, index_bicycles) == OK @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_ft_aggregate_with_movies_data( self, glide_client: GlideClusterClient, protocol ): - prefixMovies = "{movies}:" - indexMovies = prefixMovies + str(uuid.uuid4()) + prefix_movies = "{movies}:" + index_movies = prefix_movies + str(uuid.uuid4()) # Create index for movies data. await TestFt._create_index_for_ft_aggregate_with_movies_data( - self=self, - glide_client=glide_client, - index_name=indexMovies, - prefix=prefixMovies, + self, + glide_client, + index_movies, + prefix_movies, ) # Set JSON keys with movies data. await TestFt._create_hash_keys_for_ft_aggregate_with_movies_data( - self=self, glide_client=glide_client, prefix=prefixMovies + self, glide_client, prefix_movies ) # Wait for index to be updated. time.sleep(self.sleep_wait_time) # Run FT.AGGREGATE command with the following arguments: # ['FT.AGGREGATE', '{movies}:5a0e6257-3488-4514-96f2-f4c80f6cb0a9', '*', 'LOAD', '*', 'APPLY', 'ceil(@rating)', 'AS', 'r_rating', 'GROUPBY', '1', '@genre', 'REDUCE', 'COUNT', '0', 'AS', 'nb_of_movies', 'REDUCE', 'SUM', '1', 'votes', 'AS', 'nb_of_votes', 'REDUCE', 'AVG', '1', 'r_rating', 'AS', 'avg_rating', 'SORTBY', '4', '@avg_rating', 'DESC', '@nb_of_votes', 'DESC'] - - ftAggregateOptions: FtAggregateOptions = FtAggregateOptions( + # Testing for bytes type input. + ft_aggregate_options: FtAggregateOptions = FtAggregateOptions( loadAll=True, clauses=[ - FtAggregateApply(expression="ceil(@rating)", name="r_rating"), + FtAggregateApply(expression=b"ceil(@rating)", name=b"r_rating"), FtAggregateGroupBy( - ["@genre"], + [b"@genre"], [ - FtAggregateReducer("COUNT", [], "nb_of_movies"), - FtAggregateReducer("SUM", ["votes"], "nb_of_votes"), - FtAggregateReducer("AVG", ["r_rating"], "avg_rating"), + FtAggregateReducer(b"COUNT", [], b"nb_of_movies"), + FtAggregateReducer(b"SUM", [b"votes"], b"nb_of_votes"), + FtAggregateReducer(b"AVG", [b"r_rating"], b"avg_rating"), ], ), FtAggregateSortBy( properties=[ - FtAggregateSortProperty("@avg_rating", OrderBy.DESC), - FtAggregateSortProperty("@nb_of_votes", OrderBy.DESC), + FtAggregateSortProperty(b"@avg_rating", OrderBy.DESC), + FtAggregateSortProperty(b"@nb_of_votes", OrderBy.DESC), ] ), ], ) result = await ft.aggregate( glide_client, - indexName=indexMovies, - query="*", - options=ftAggregateOptions, + index_name=index_movies.encode("utf-8"), + query=b"*", + options=ft_aggregate_options, ) - sortedResult = sorted( + sorted_result = sorted( result, key=lambda x: ( x[b"genre"], @@ -451,7 +799,7 @@ async def test_ft_aggregate_with_movies_data( x[b"avg_rating"], ), ) - expectedResultSet = sorted( + expected_result = sorted( [ { b"genre": b"Drama", @@ -493,20 +841,20 @@ async def test_ft_aggregate_with_movies_data( x[b"avg_rating"], ), ) - assert expectedResultSet == sortedResult + assert expected_result == sorted_result # Test FT.PROFILE for the above mentioned FT.AGGREGATE query - ftProfileResult = await ft.profile( + ft_profile_result = await ft.profile( glide_client, - indexMovies, + index_movies, FtProfileOptions.from_query_options( - query="*", queryOptions=ftAggregateOptions + query="*", query_options=ft_aggregate_options ), ) - assert len(ftProfileResult) > 0 + assert len(ft_profile_result) > 0 assert ( sorted( - ftProfileResult[0], + ft_profile_result[0], key=lambda x: ( x[b"genre"], x[b"nb_of_movies"], @@ -514,10 +862,10 @@ async def test_ft_aggregate_with_movies_data( x[b"avg_rating"], ), ) - == expectedResultSet + == expected_result ) - assert await ft.dropindex(glide_client, indexName=indexMovies) == OK + assert await ft.dropindex(glide_client, index_movies) == OK async def _create_index_for_ft_aggregate_with_bicycles_data( self, glide_client: GlideClusterClient, index_name: TEncodable, prefix @@ -560,11 +908,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "1", ".", - '{"brand": "Bicyk", "model": "Hillcraft", "price": 1200, "description":' - + ' "Kids want to ride with as little weight as possible. Especially on an' - + ' incline! They may be at the age when a 27.5\\" wheel bike is just too clumsy' - + ' coming off a 24\\" bike. The Hillcraft 26 is just the solution they need!",' - + ' "condition": "used"}', + '{"brand": "Bicyk", "model": "Hillcraft", "price": 1200, "condition": "used"}', ) == OK ) @@ -574,12 +918,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "2", ".", - '{"brand": "Nord", "model": "Chook air 5", "price": 815, "description":' - + ' "The Chook Air 5 gives kids aged six years and older a durable and' - + " uberlight mountain bike for their first experience on tracks and easy" - + " cruising through forests and fields. The lower top tube makes it easy to" - + " mount and dismount in any situation, giving your kids greater safety on the" - + ' trails.", "condition": "used"}', + '{"brand": "Nord", "model": "Chook air 5", "price": 815, "condition": "used"}', ) == OK ) @@ -589,14 +928,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "3", ".", - '{"brand": "Eva", "model": "Eva 291", "price": 3400, "description": "The' - + " sister company to Nord, Eva launched in 2005 as the first and only" - + " women-dedicated bicycle brand. Designed by women for women, allEva bikes are" - + " optimized for the feminine physique using analytics from a body metrics" - + " database. If you like 29ers, try the Eva 291. It\\u2019s a brand new bike for" - + " 2022.. This full-suspension, cross-country ride has been designed for" - + " velocity. The 291 has 100mm of front and rear travel, a superlight aluminum" - + ' frame and fast-rolling 29-inch wheels. Yippee!", "condition": "used"}', + '{"brand": "Eva", "model": "Eva 291", "price": 3400, "condition": "used"}', ) == OK ) @@ -606,12 +938,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "4", ".", - '{"brand": "Noka Bikes", "model": "Kahuna", "price": 3200, "description":' - + ' "Whether you want to try your hand at XC racing or are looking for a lively' - + " trail bike that's just as inspiring on the climbs as it is over rougher" - + " ground, the Wilder is one heck of a bike built specifically for short women." - + " Both the frames and components have been tweaked to include a women\\u2019s" - + ' saddle, different bars and unique colourway.", "condition": "used"}', + '{"brand": "Noka Bikes", "model": "Kahuna", "price": 3200, "condition": "used"}', ) == OK ) @@ -621,12 +948,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "5", ".", - '{"brand": "Breakout", "model": "XBN 2.1 Alloy", "price": 810,' - + ' "description": "The XBN 2.1 Alloy is our entry-level road bike \\u2013 but' - + " that\\u2019s not to say that it\\u2019s a basic machine. With an internal" - + " weld aluminium frame, a full carbon fork, and the slick-shifting Claris gears" - + " from Shimano\\u2019s, this is a bike which doesn\\u2019t break the bank and" - + ' delivers craved performance.", "condition": "new"}', + '{"brand": "Breakout", "model": "XBN 2.1 Alloy", "price": 810, "condition": "new"}', ) == OK ) @@ -636,13 +958,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "6", ".", - '{"brand": "ScramBikes", "model": "WattBike", "price": 2300,' - + ' "description": "The WattBike is the best e-bike for people who still feel' - + " young at heart. It has a Bafang 1000W mid-drive system and a 48V 17.5AH" - + " Samsung Lithium-Ion battery, allowing you to ride for more than 60 miles on" - + " one charge. It\\u2019s great for tackling hilly terrain or if you just fancy" - + " a more leisurely ride. With three working modes, you can choose between" - + ' E-bike, assisted bicycle, and normal bike modes.", "condition": "new"}', + '{"brand": "ScramBikes", "model": "WattBike", "price": 2300, "condition": "new"}', ) == OK ) @@ -652,20 +968,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "7", ".", - '{"brand": "Peaknetic", "model": "Secto", "price": 430, "description":' - + ' "If you struggle with stiff fingers or a kinked neck or back after a few' - " minutes on the road, this lightweight, aluminum bike alleviates those issues" - " and allows you to enjoy the ride. From the ergonomic grips to the" - " lumbar-supporting seat position, the Roll Low-Entry offers incredible" - " comfort. The rear-inclined seat tube facilitates stability by allowing you to" - " put a foot on the ground to balance at a stop, and the low step-over frame" - " makes it accessible for all ability and mobility levels. The saddle is very" - " soft, with a wide back to support your hip joints and a cutout in the center" - " to redistribute that pressure. Rim brakes deliver satisfactory braking" - " control, and the wide tires provide a smooth, stable ride on paved roads and" - " gravel. Rack and fender mounts facilitate setting up the Roll Low-Entry as" - " your preferred commuter, and the BMX-like handlebar offers space for mounting" - ' a flashlight, bell, or phone holder.", "condition": "new"}', + '{"brand": "Peaknetic", "model": "Secto", "price": 430, "condition": "new"}', ) == OK ) @@ -675,16 +978,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "8", ".", - '{"brand": "nHill", "model": "Summit", "price": 1200, "description":' - + ' "This budget mountain bike from nHill performs well both on bike paths and' - + " on the trail. The fork with 100mm of travel absorbs rough terrain. Fat Kenda" - + " Booster tires give you grip in corners and on wet trails. The Shimano Tourney" - + " drivetrain offered enough gears for finding a comfortable pace to ride" - + " uphill, and the Tektro hydraulic disc brakes break smoothly. Whether you want" - + " an affordable bike that you can take to work, but also take trail in" - + " mountains on the weekends or you\\u2019re just after a stable, comfortable" - + ' ride for the bike path, the Summit gives a good value for money.",' - + ' "condition": "new"}', + '{"brand": "nHill", "model": "Summit", "price": 1200, "condition": "new"}', ) == OK ) @@ -694,14 +988,7 @@ async def _create_json_keys_for_ft_aggregate_with_bicycles_data( glide_client, prefix + "9", ".", - '{"model": "ThrillCycle", "brand": "BikeShind", "price": 815,' - + ' "description": "An artsy, retro-inspired bicycle that\\u2019s as' - + " functional as it is pretty: The ThrillCycle steel frame offers a smooth ride." - + " A 9-speed drivetrain has enough gears for coasting in the city, but we" - + " wouldn\\u2019t suggest taking it to the mountains. Fenders protect you from" - + " mud, and a rear basket lets you transport groceries, flowers and books. The" - + " ThrillCycle comes with a limited lifetime warranty, so this little guy will" - + ' last you long past graduation.", "condition": "refurbished"}', + '{"model": "ThrillCycle", "brand": "BikeShind", "price": 815, "condition": "refurbished"}', ) == OK ) @@ -733,10 +1020,6 @@ async def _create_hash_keys_for_ft_aggregate_with_movies_data( prefix + "11002", { "title": "Star Wars: Episode V - The Empire Strikes Back", - "plot": "After the Rebels are brutally overpowered by the Empire on the ice planet Hoth," - + " Luke Skywalker begins Jedi training with Yoda, while his friends are" - + " pursued by Darth Vader and a bounty hunter named Boba Fett all over the" - + " galaxy.", "release_year": "1980", "genre": "Action", "rating": "8.7", @@ -749,8 +1032,6 @@ async def _create_hash_keys_for_ft_aggregate_with_movies_data( prefix + "11003", { "title": "The Godfather", - "plot": "The aging patriarch of an organized crime dynasty transfers control of his" - + " clandestine empire to his reluctant son.", "release_year": "1972", "genre": "Drama", "rating": "9.2", @@ -763,8 +1044,6 @@ async def _create_hash_keys_for_ft_aggregate_with_movies_data( prefix + "11004", { "title": "Heat", - "plot": "A group of professional bank robbers start to feel the heat from police when they" - + " unknowingly leave a clue at their latest heist.", "release_year": "1995", "genre": "Thriller", "rating": "8.2", @@ -777,7 +1056,6 @@ async def _create_hash_keys_for_ft_aggregate_with_movies_data( prefix + "11005", { "title": "Star Wars: Episode VI - Return of the Jedi", - "plot": "The Rebels dispatch to Endor to destroy the second Empire's Death Star.", "release_year": "1983", "genre": "Action", "rating": "8.3", diff --git a/python/python/tests/tests_server_modules/test_json.py b/python/python/tests/tests_server_modules/test_json.py index a77797af93..b098a80004 100644 --- a/python/python/tests/tests_server_modules/test_json.py +++ b/python/python/tests/tests_server_modules/test_json.py @@ -1712,8 +1712,8 @@ async def test_json_arrappend(self, glide_client: TGlideClient): initial_json_value = '{"a": 1, "b": ["one", "two"]}' assert await json.set(glide_client, key, "$", initial_json_value) == OK - assert await json.arrappend(glide_client, key, ['"three"'], "$.b") == [3] - assert await json.arrappend(glide_client, key, ['"four"', '"five"'], ".b") == 5 + assert await json.arrappend(glide_client, key, "$.b", ['"three"']) == [3] + assert await json.arrappend(glide_client, key, ".b", ['"four"', '"five"']) == 5 result = await json.get(glide_client, key, "$") assert isinstance(result, bytes) @@ -1721,27 +1721,27 @@ async def test_json_arrappend(self, glide_client: TGlideClient): {"a": 1, "b": ["one", "two", "three", "four", "five"]} ] - assert await json.arrappend(glide_client, key, ['"value"'], "$.a") == [None] + assert await json.arrappend(glide_client, key, "$.a", ['"value"']) == [None] # JSONPath, path doesnt exist - assert await json.arrappend(glide_client, key, ['"value"'], "$.c") == [] + assert await json.arrappend(glide_client, key, "$.c", ['"value"']) == [] # Legacy path, `path` doesnt exist with pytest.raises(RequestError): - await json.arrappend(glide_client, key, ['"value"'], ".c") + await json.arrappend(glide_client, key, ".c", ['"value"']) # Legacy path, the JSON value at `path` is not a array with pytest.raises(RequestError): - await json.arrappend(glide_client, key, ['"value"'], ".a") + await json.arrappend(glide_client, key, ".a", ['"value"']) with pytest.raises(RequestError): - await json.arrappend(glide_client, "non_existing_key", ['"six"'], "$.b") + await json.arrappend(glide_client, "non_existing_key", "$.b", ['"six"']) with pytest.raises(RequestError): - await json.arrappend(glide_client, "non_existing_key", ['"six"'], ".b") + await json.arrappend(glide_client, "non_existing_key", ".b", ['"six"']) # multiple path match json_value = '[[], ["a"], ["a", "b"]]' assert await json.set(glide_client, key, "$", json_value) == OK - assert await json.arrappend(glide_client, key, ['"c"'], "[*]") == 1 + assert await json.arrappend(glide_client, key, "[*]", ['"c"']) == 1 result = await json.get(glide_client, key, "$") assert isinstance(result, bytes) assert OuterJson.loads(result) == [[["c"], ["a", "c"], ["a", "b", "c"]]] diff --git a/utils/TestUtils.ts b/utils/TestUtils.ts index f6c493771b..423bf8e9cb 100644 --- a/utils/TestUtils.ts +++ b/utils/TestUtils.ts @@ -22,7 +22,7 @@ function parseOutput(input: string): { .map((address) => address.split(":")) .map((address) => [address[0], Number(address[1])]) as [ string, - number + number, ][]; if (clusterFolder === undefined || ports === undefined) { @@ -43,7 +43,7 @@ export class ValkeyCluster { private constructor( version: string, addresses: [string, number][], - clusterFolder?: string + clusterFolder?: string, ) { this.addresses = addresses; this.clusterFolder = clusterFolder; @@ -56,9 +56,9 @@ export class ValkeyCluster { replicaCount: number, getVersionCallback: ( addresses: [string, number][], - clusterMode: boolean + clusterMode: boolean, ) => Promise, - loadModule?: string[] + loadModule?: string[], ): Promise { return new Promise((resolve, reject) => { let command = `start -r ${replicaCount} -n ${shardCount}`; @@ -70,7 +70,7 @@ export class ValkeyCluster { if (loadModule) { if (loadModule.length === 0) { throw new Error( - "Please provide the path(s) to the module(s) you want to load." + "Please provide the path(s) to the module(s) you want to load.", ); } @@ -94,12 +94,12 @@ export class ValkeyCluster { new ValkeyCluster( ver, addresses, - clusterFolder - ) - ) + clusterFolder, + ), + ), ); } - } + }, ); }); } @@ -109,11 +109,11 @@ export class ValkeyCluster { addresses: [string, number][], getVersionCallback: ( addresses: [string, number][], - clusterMode: boolean - ) => Promise + clusterMode: boolean, + ) => Promise, ): Promise { return getVersionCallback(addresses, cluster_mode).then( - (ver) => new ValkeyCluster(ver, addresses, "") + (ver) => new ValkeyCluster(ver, addresses, ""), ); } diff --git a/utils/package.json b/utils/package.json index 0bbd5c9d5b..6d3100505c 100644 --- a/utils/package.json +++ b/utils/package.json @@ -12,9 +12,9 @@ "author": "", "license": "Apache-2.0", "devDependencies": { - "@types/node": "^20.12.12", + "@types/node": "22.9", "@types/semver": "^7.5.8", - "prettier": "^2.8.8" + "prettier": "^3.3" }, "dependencies": { "child_process": "^1.0.2", diff --git a/utils/release-candidate-testing/node/index.js b/utils/release-candidate-testing/node/index.js index be55f97cc4..450ed0e308 100644 --- a/utils/release-candidate-testing/node/index.js +++ b/utils/release-candidate-testing/node/index.js @@ -4,7 +4,6 @@ import { GlideClient, GlideClusterClient } from "@valkey/valkey-glide"; import { ValkeyCluster } from "../../TestUtils.js"; - async function runCommands(client) { console.log("Executing commands"); // Set a bunch of keys @@ -41,7 +40,9 @@ async function runCommands(client) { // check that the correct number of keys were deleted if (deletedKeysNum !== 3) { console.log(deletedKeysNum); - throw new Error(`Unexpected number of keys deleted, expected 3, got ${deletedKeysNum}`); + throw new Error( + `Unexpected number of keys deleted, expected 3, got ${deletedKeysNum}`, + ); } // check that the keys were deleted for (let i = 1; i <= 3; i++) { @@ -74,7 +75,8 @@ async function clusterTests() { try { console.log("Testing cluster"); console.log("Creating cluster"); - let valkeyCluster = await ValkeyCluster.createCluster(true, + let valkeyCluster = await ValkeyCluster.createCluster( + true, 3, 1, getServerVersion, @@ -82,8 +84,12 @@ async function clusterTests() { console.log("Cluster created"); console.log("Connecting to cluster"); - let addresses = valkeyCluster.getAddresses().map((address) => { return { host: address[0], port: address[1] } }); - const client = await GlideClusterClient.createClient({ addresses: addresses }); + let addresses = valkeyCluster.getAddresses().map((address) => { + return { host: address[0], port: address[1] }; + }); + const client = await GlideClusterClient.createClient({ + addresses: addresses, + }); console.log("Connected to cluster"); await runCommands(client); @@ -103,9 +109,10 @@ async function clusterTests() { async function standaloneTests() { try { - console.log("Testing standalone Cluster") + console.log("Testing standalone Cluster"); console.log("Creating Cluster"); - let valkeyCluster = await ValkeyCluster.createCluster(false, + let valkeyCluster = await ValkeyCluster.createCluster( + false, 1, 1, getServerVersion, @@ -113,13 +120,14 @@ async function standaloneTests() { console.log("Cluster created"); console.log("Connecting to Cluster"); - let addresses = valkeyCluster.getAddresses().map((address) => { return { host: address[0], port: address[1] } }); + let addresses = valkeyCluster.getAddresses().map((address) => { + return { host: address[0], port: address[1] }; + }); const client = await GlideClient.createClient({ addresses: addresses }); console.log("Connected to Cluster"); await closeClientAndCluster(client, valkeyCluster); console.log("Done"); - } catch (error) { // Need this part just when running in our self-hosted runner, so if the test fails before closing Clusters we still kill them and clean up if (process.platform === "linux" && process.arch in ["arm", "arm64"]) { @@ -130,7 +138,6 @@ async function standaloneTests() { } } - async function main() { await clusterTests(); console.log("Cluster tests passed");