diff --git a/Makefile b/Makefile index 304a2648ba..1b41f60047 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -VERSION:=7.2.5 +VERSION:=7.4.0 PROJECT?=redis GH_ORG?=redis SPRING_PROFILE?=ci diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java index 80cc128e55..48949bee68 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java @@ -2566,6 +2566,76 @@ public Long hStrLen(byte[] key, byte[] field) { return convertAndReturn(delegate.hStrLen(key, field), Converters.identityConverter()); } + @Override + public List hExpire(byte[] key, long seconds, byte[]... fields) { + return this.delegate.hExpire(key, seconds, fields); + } + + @Override + public List hpExpire(byte[] key, long millis, byte[]... fields) { + return this.delegate.hpExpire(key, millis, fields); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return this.delegate.hExpireAt(key, unixTime, fields); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return this.delegate.hpExpireAt(key, unixTimeInMillis, fields); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return this.delegate.hPersist(key, fields); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return this.delegate.hTtl(key, fields); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return this.delegate.hTtl(key, timeUnit, fields); + } + + @Override + public List hExpire(String key, long seconds, String... fields) { + return hExpire(serialize(key), seconds, serializeMulti(fields)); + } + + @Override + public List hpExpire(String key, long millis, String... fields) { + return hpExpire(serialize(key), millis, serializeMulti(fields)); + } + + @Override + public List hExpireAt(String key, long unixTime, String... fields) { + return hExpireAt(serialize(key), unixTime, serializeMulti(fields)); + } + + @Override + public List hpExpireAt(String key, long unixTimeInMillis, String... fields) { + return hpExpireAt(serialize(key), unixTimeInMillis, serializeMulti(fields)); + } + + @Override + public List hPersist(String key, String... fields) { + return hPersist(serialize(key), serializeMulti(fields)); + } + + @Override + public List hTtl(String key, String... fields) { + return hTtl(serialize(key), serializeMulti(fields)); + } + + @Override + public List hTtl(String key, TimeUnit timeUnit, String... fields) { + return hTtl(serialize(key), timeUnit, serializeMulti(fields)); + } + @Override public void setClientName(byte[] name) { this.delegate.setClientName(name); diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java index aaeaafe18b..b5583d126c 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java @@ -65,6 +65,7 @@ * @author ihaohong * @author Dennis Neufeld * @author Shyngys Sapraliyev + * @author Tihomir Mateev * @since 2.0 */ @Deprecated @@ -1470,6 +1471,55 @@ default Long hStrLen(byte[] key, byte[] field) { return hashCommands().hStrLen(key, field); } + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpire(byte[] key, long seconds, byte[]... fields) { + return hashCommands().hExpire(key, seconds, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpire(byte[] key, long millis, byte[]... fields) { + return hashCommands().hpExpire(key, millis, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return hashCommands().hExpireAt(key, unixTime, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return hashCommands().hpExpireAt(key, unixTimeInMillis, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hPersist(byte[] key, byte[]... fields) { + return hashCommands().hPersist(key, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hTtl(byte[] key, byte[]... fields) { + return hashCommands().hTtl(key, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return hashCommands().hTtl(key, timeUnit, fields); + } + // GEO COMMANDS /** @deprecated in favor of {@link RedisConnection#geoCommands()}}. */ diff --git a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java index d75c2242a0..94f952eaeb 100644 --- a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java @@ -19,6 +19,8 @@ import reactor.core.publisher.Mono; import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -44,10 +46,34 @@ * * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev * @since 2.0 */ public interface ReactiveHashCommands { + /** + * {@link Command} for hash-bound operations. + * + * @author Christoph Strobl + * @author Tihomir Mateev + */ + class KeyFieldsCommand extends KeyCommand { + + private final List fields; + + private KeyFieldsCommand(@Nullable ByteBuffer key, List fields) { + super(key); + this.fields = fields; + } + + /** + * @return never {@literal null}. + */ + public List getFields() { + return fields; + } + } + /** * {@literal HSET} {@link Command}. * @@ -216,15 +242,10 @@ default Mono hMSet(ByteBuffer key, Map fieldVal * @author Christoph Strobl * @see Redis Documentation: HGET */ - class HGetCommand extends KeyCommand { - - private List fields; + class HGetCommand extends KeyFieldsCommand { private HGetCommand(@Nullable ByteBuffer key, List fields) { - - super(key); - - this.fields = fields; + super(key, fields); } /** @@ -263,14 +284,7 @@ public HGetCommand from(ByteBuffer key) { Assert.notNull(key, "Key must not be null"); - return new HGetCommand(key, fields); - } - - /** - * @return never {@literal null}. - */ - public List getFields() { - return fields; + return new HGetCommand(key, getFields()); } } @@ -394,15 +408,10 @@ default Mono hExists(ByteBuffer key, ByteBuffer field) { * @author Christoph Strobl * @see Redis Documentation: HDEL */ - class HDelCommand extends KeyCommand { - - private final List fields; + class HDelCommand extends KeyFieldsCommand { private HDelCommand(@Nullable ByteBuffer key, List fields) { - - super(key); - - this.fields = fields; + super(key, fields); } /** @@ -441,14 +450,7 @@ public HDelCommand from(ByteBuffer key) { Assert.notNull(key, "Key must not be null"); - return new HDelCommand(key, fields); - } - - /** - * @return never {@literal null}. - */ - public List getFields() { - return fields; + return new HDelCommand(key, getFields()); } } @@ -842,4 +844,453 @@ default Mono hStrLen(ByteBuffer key, ByteBuffer field) { * @since 2.1 */ Flux> hStrLen(Publisher commands); + + /** + * @author Tihomir Mateev + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + class Expire extends KeyFieldsCommand { + + private final Duration ttl; + + /** + * Creates a new {@link Expire} given a {@code key}, a {@link List} of {@code fields} and a time-to-live + * + * @param key can be {@literal null}. + * @param fields must not be {@literal null}. + * @param ttl the duration of the time to live. + */ + private Expire(@Nullable ByteBuffer key, List fields, Duration ttl) { + + super(key, fields); + this.ttl = ttl; + } + + /** + * Specify the {@code fields} within the hash to set an expiration for. + * + * @param fields must not be {@literal null}. + * @return new instance of {@link Expire}. + */ + public static Expire expire(List fields, Duration ttl) { + + Assert.notNull(fields, "Field must not be null"); + return new Expire(null, fields, ttl); + } + + /** + * Define the {@code key} the hash is stored at. + * + * @param key must not be {@literal null}. + * @return new instance of {@link Expire}. + */ + public Expire from(ByteBuffer key) { + return new Expire(key, getFields(), ttl); + } + + /** + * @return the ttl. + */ + public Duration getTtl() { + return ttl; + } + } + + /** + * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Mono hExpire(ByteBuffer key, Duration duration, ByteBuffer field) { + Assert.notNull(duration, "Duration must not be null"); + + return hExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Flux hExpire(ByteBuffer key, Duration duration, List fields) { + Assert.notNull(duration, "Duration must not be null"); + + return hExpire(Flux.just(Expire.expire(fields, duration).from(key))) + .mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @since 3.5 + * @see Redis Documentation: HEXPIRE + */ + Flux> hExpire(Publisher commands); + + /** + * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Mono hpExpire(ByteBuffer key, Duration duration, ByteBuffer field) { + Assert.notNull(duration, "Duration must not be null"); + + return hpExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Flux hpExpire(ByteBuffer key, Duration duration, List fields) { + Assert.notNull(duration, "Duration must not be null"); + + return hpExpire(Flux.just(Expire.expire(fields, duration).from(key))) + .mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @since 3.5 + * @see Redis Documentation: HEXPIRE + */ + Flux> hpExpire(Publisher commands); + + /** + * @author Tihomir Mateev + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + class ExpireAt extends KeyFieldsCommand { + + private final Instant expireAt; + + /** + * Creates a new {@link ExpireAt} given a {@code key}, a {@link List} of {@literal fields} and a {@link Instant} + * + * @param key can be {@literal null}. + * @param fields must not be {@literal null}. + * @param expireAt the {@link Instant} to expire at. + */ + private ExpireAt(@Nullable ByteBuffer key, List fields, Instant expireAt) { + + super(key, fields); + this.expireAt = expireAt; + } + + /** + * Specify the {@code fields} within the hash to set an expiration for. + * + * @param fields must not be {@literal null}. + * @return new instance of {@link ExpireAt}. + */ + public static ExpireAt expireAt(List fields, Instant expireAt) { + + Assert.notNull(fields, "Fields must not be null"); + return new ExpireAt(null, fields, expireAt); + } + + /** + * Define the {@code key} the hash is stored at. + * + * @param key must not be {@literal null}. + * @return new instance of {@link ExpireAt}. + */ + public ExpireAt from(ByteBuffer key) { + return new ExpireAt(key, getFields(), expireAt); + } + + /** + * @return the ttl. + */ + public Instant getExpireAt() { + return expireAt; + } + } + + /** + * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + default Mono hExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) { + + Assert.notNull(expireAt, "Duration must not be null"); + return hExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + default Flux hExpireAt(ByteBuffer key, Instant expireAt, List fields) { + Assert.notNull(expireAt, "Duration must not be null"); + + return hExpireAt(Flux.just(ExpireAt.expireAt(fields, expireAt).from(key))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in seconds since Unix epoch + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @since 3.5 + * @see Redis Documentation: HEXPIREAT + */ + Flux> hExpireAt(Publisher commands); + + /** + * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in milliseconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + default Mono hpExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) { + + Assert.notNull(expireAt, "Duration must not be null"); + return hpExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in milliseconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + default Flux hpExpireAt(ByteBuffer key, Instant expireAt, List fields) { + Assert.notNull(expireAt, "Duration must not be null"); + + return hpExpireAt(Flux.just(ExpireAt.expireAt(fields, expireAt).from(key))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in milliseconds since Unix epoch + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @since 3.5 + * @see Redis Documentation: HPEXPIREAT + */ + Flux> hpExpireAt(Publisher commands); + + /** + * Persist a given {@literal field} removing any associated expiration, measured as absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the persist result - {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + default Mono hPersist(ByteBuffer key, ByteBuffer field) { + + return hPersist(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Persist a given {@link List} of {@literal field} removing any associated expiration. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + default Flux hPersist(ByteBuffer key, List fields) { + + return hPersist(Flux.just(new KeyFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Persist a given {@link List} of {@literal field} removing any associated expiration. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * {@literal null} when used in pipeline / transaction. * @since 3.5 + * @see Redis Documentation: HPERSIST + */ + Flux> hPersist(Publisher commands); + + /** + * Returns the time-to-live of a given {@literal field} in seconds. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the TTL result - the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + default Mono hTtl(ByteBuffer key, ByteBuffer field) { + + return hTtl(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the TTL results one by one - the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + default Flux hTtl(ByteBuffer key, List fields) { + + return hTtl(Flux.just(new KeyFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @since 3.5 + * @see Redis Documentation: HTTL + */ + Flux> hTtl(Publisher commands); + + + /** + * Returns the time-to-live of a given {@literal field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the TTL result - the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPTTL + * @since 3.5 + */ + default Mono hpTtl(ByteBuffer key, ByteBuffer field) { + + return hpTtl(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the TTL results one by one - the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPTTL + * @since 3.5 + */ + default Flux hpTtl(ByteBuffer key, List fields) { + + return hpTtl(Flux.just(new KeyFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @since 3.5 + * @see Redis Documentation: HPTTL + */ + Flux> hpTtl(Publisher commands); } diff --git a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java index d1587a7918..d2eba62b52 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.ScanOptions; @@ -29,6 +30,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev */ public interface RedisHashCommands { @@ -249,4 +251,111 @@ public interface RedisHashCommands { */ @Nullable Long hStrLen(byte[] key, byte[] field); + + /** + * Set time to live for given {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.4 + */ + @Nullable + List hExpire(byte[] key, long seconds, byte[]... fields); + + /** + * Set time to live for given {@code field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.4 + */ + @Nullable + List hpExpire(byte[] key, long millis, byte[]... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.4 + */ + @Nullable + List hExpireAt(byte[] key, long unixTime, byte[]... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.4 + */ + @Nullable + List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields); + + /** + * Remove the expiration from given {@code field}. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.4 + */ + @Nullable + List hPersist(byte[] key, byte[]... fields); + + /** + * Get the time to live for {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.4 + */ + @Nullable + List hTtl(byte[] key, byte[]... fields); + + /** + * Get the time to live for {@code field} in and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return for each of the fields supplied - the time to live in the {@link TimeUnit} provided; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.4 + */ + @Nullable + List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields); } diff --git a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java index e198eecfd3..5fa06eb19b 100644 --- a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java @@ -2333,6 +2333,113 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, @Nullable Long hStrLen(String key, String field); + /** + * Set time to live for given {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.4 + */ + @Nullable + List hExpire(String key, long seconds, String... fields); + + /** + * Set time to live for given {@code field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.4 + */ + @Nullable + List hpExpire(String key, long millis, String... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.4 + */ + @Nullable + List hExpireAt(String key, long unixTime, String... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not + * met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.4 + */ + @Nullable + List hpExpireAt(String key, long unixTimeInMillis, String... fields); + + /** + * Remove the expiration from given {@code field}. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; + * {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.4 + */ + @Nullable + List hPersist(String key, String... fields); + + /** + * Get the time to live for {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.4 + */ + @Nullable + List hTtl(String key, String... fields); + + /** + * Get the time to live for {@code field} in and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in the {@link TimeUnit} provided; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.4 + */ + @Nullable + List hTtl(String key, TimeUnit timeUnit, String... fields); + // ------------------------------------------------------------------------- // Methods dealing with HyperLogLog // ------------------------------------------------------------------------- diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java index 1803da058a..747293a9cb 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.DataAccessException; import org.springframework.data.redis.connection.RedisHashCommands; @@ -39,6 +40,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author John Blum + * @author Tihomir Mateev * @since 2.0 */ class JedisClusterHashCommands implements RedisHashCommands { @@ -287,6 +289,92 @@ protected ScanIteration> doScan(CursorId cursorId, ScanOpt }.open(); } + @Override + public List hExpire(byte[] key, long seconds, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hexpire(key, seconds, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hpExpire(byte[] key, long millis, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpexpire(key, millis, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hexpireAt(key, unixTime, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpexpireAt(key, unixTimeInMillis, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpersist(key, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().httl(key, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().httl(key, fields).stream() + .map(it -> it != null ? timeUnit.convert(it, TimeUnit.SECONDS) : null) + .toList(); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java index be2cf8bb90..740e13b71e 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java @@ -25,6 +25,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.redis.connection.RedisHashCommands; @@ -43,6 +44,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author John Blum + * @author Tihomir Mateev * @since 2.0 */ class JedisHashCommands implements RedisHashCommands { @@ -250,6 +252,42 @@ protected void doClose() { }.open(); } + @Override + public List hExpire(byte[] key, long seconds, byte[]... fields) { + return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, fields); + } + + @Override + public List hpExpire(byte[] key, long millis, byte[]... fields) { + return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, fields); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, fields); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis, fields); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return connection.invoke().just(Jedis::hpersist, PipelineBinaryCommands::hpersist, key, fields); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return connection.invoke().just(Jedis::httl, PipelineBinaryCommands::httl, key, fields); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return connection.invoke().fromMany(Jedis::httl, PipelineBinaryCommands::httl, key, fields) + .toList(Converters.secondsToTimeUnit(timeUnit)); + } + @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java index 5125a82fb6..59e6ed1b96 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.redis.connection.RedisHashCommands; @@ -39,6 +40,7 @@ /** * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev * @since 2.0 */ class LettuceHashCommands implements RedisHashCommands { @@ -208,6 +210,41 @@ public Cursor> hScan(byte[] key, ScanOptions options) { return hScan(key, CursorId.initial(), options); } + @Override + public List hExpire(byte[] key, long seconds, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hexpire, key, seconds, fields).toList(); + } + + @Override + public List hpExpire(byte[] key, long millis, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpire, key, millis, fields).toList(); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hexpireat, key, unixTime, fields).toList(); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpireat, key, unixTimeInMillis, fields).toList(); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpersist, key, fields).toList(); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::httl, key, fields).toList(); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::httl, key, fields) + .toList(Converters.secondsToTimeUnit(timeUnit)); + } /** * @param key diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java index 0837489840..ca6bb44418 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java @@ -264,6 +264,90 @@ public Flux> hStrLen(Publisher> hExpire(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hexpire(command.getKey(), command.getTtl().toSeconds(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hpExpire(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpexpire(command.getKey(), command.getTtl().toMillis(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hExpireAt(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hexpireat(command.getKey(), command.getExpireAt().getEpochSecond(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hpExpireAt(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpexpireat(command.getKey(), command.getExpireAt().toEpochMilli(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hPersist(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpersist(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hTtl(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.httl(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hpTtl(Publisher commands) { + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpttl(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + private static Map.Entry toEntry(KeyValue kv) { return new Entry() { diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java index 90e77b9ae5..19a8e4164f 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java @@ -15,10 +15,13 @@ */ package org.springframework.data.redis.core; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.lang.Nullable; @@ -29,6 +32,7 @@ * @author Christoph Strobl * @author Ninad Divadkar * @author Mark Paluch + * @author Tihomir Mateev */ public interface BoundHashOperations extends BoundKeyOperations { @@ -153,6 +157,78 @@ public interface BoundHashOperations extends BoundKeyOperations { @Nullable Long lengthOfValue(HK hashKey); + /** + * Set time to live for given {@code hashKey} (aka field). + * + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); {@code -2} + * indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List expire(Duration timeout, Collection hashKeys); + + /** + * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. + * + * @param expireAt must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List expireAt(Instant expireAt, Collection hashKeys); + + /** + * Remove the expiration from given {@code hashKey} (aka field). + * + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; {@literal null} when + * used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + List persist(Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) in seconds. + * + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List getExpire(Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. + * + * @param timeUnit must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List getExpire(TimeUnit timeUnit, Collection hashKeys); + /** * Get size of hash at the bound key. * diff --git a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java index 1bfb9c3467..2d79bd7f99 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java @@ -15,6 +15,8 @@ */ package org.springframework.data.redis.core; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; @@ -22,6 +24,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.core.convert.converter.Converter; import org.springframework.data.redis.connection.convert.Converters; @@ -34,6 +37,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Ninad Divadkar + * @author Tihomir Mateev */ class DefaultHashOperations extends AbstractOperations implements HashOperations { @@ -210,6 +214,47 @@ public Boolean putIfAbsent(K key, HK hashKey, HV value) { return execute(connection -> connection.hSetNX(rawKey, rawHashKey, rawHashValue)); } + @Override + public List expire(K key, Duration duration, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + long rawTimeout = duration.toMillis(); + + return execute(connection -> connection.hpExpire(rawKey, rawTimeout, rawHashKeys)); + } + + @Override + public List expireAt(K key, Instant instant, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + + return execute(connection -> connection.hpExpireAt(rawKey, instant.toEpochMilli(), rawHashKeys)); + } + + @Override + public List persist(K key, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + + return execute(connection -> connection.hPersist(rawKey, rawHashKeys)); + } + + @Override + public List getExpire(K key, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + + return execute(connection -> connection.hTtl(rawKey, rawHashKeys)); + } + + @Override + public List getExpire(K key, TimeUnit timeUnit, Collection hashKeys) { + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(hashKeys.toArray()); + + return execute(connection -> connection.hTtl(rawKey, timeUnit, rawHashKeys)); + } + @Override public List values(K key) { diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index db97cdb10a..be3871b58b 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -15,10 +15,13 @@ */ package org.springframework.data.redis.core; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.lang.Nullable; @@ -28,6 +31,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Ninad Divadkar + * @author Tihomir Mateev */ public interface HashOperations { @@ -221,6 +225,82 @@ public interface HashOperations { */ Cursor> scan(H key, ScanOptions options); + /** + * Set time to live for given {@code hashKey} (aka field). + * + * @param key must not be {@literal null}. + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); {@code -2} + * indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List expire(H key, Duration timeout, Collection hashKeys); + + /** + * Set the expiration for given {@code hashKey} (aka field) as a {@literal date} timestamp. + * + * @param key must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is deleted + * already due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List expireAt(H key, Instant expireAt, Collection hashKeys); + + /** + * Remove the expiration from given {@code hashKey} (aka field). + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is removed; + * {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such field; {@literal null} when + * used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + List persist(H key, Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) in seconds. + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List getExpire(H key, Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} (aka field) and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List getExpire(H key, TimeUnit timeUnit, Collection hashKeys); /** * @return never {@literal null}. */ diff --git a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java index b6b9cd6ed4..312c303d24 100644 --- a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java @@ -113,6 +113,7 @@ * @author Hendrik Duerkop * @author Shyngys Sapraliyev * @author Roman Osadchuk + * @author Tihomir Mateev */ public abstract class AbstractConnectionIntegrationTests { @@ -3432,6 +3433,194 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { verifyResults(Arrays.asList(new Object[] { 0L })); } + @Test + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + } + + @Test + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "missking-field")); + actual.add(connection.hExpire("missing-key", 5L, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsTwoWhenZeroProvided() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 0, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 5000L, "key-2")); + actual.add(connection.hTtl("hash-hexpire", TimeUnit.MILLISECONDS,"key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5000L)); + } + + @Test + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 5L, "missing-field")); + actual.add(connection.hpExpire("missing-key", 5L, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsTwoWhenZeroProvided() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 0, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + actual.add(connection.hExpireAt("hash-hexpire", inFiveSeconds, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + actual.add(connection.hpExpire("hash-hexpire", inFiveSeconds, "missing-field")); + actual.add(connection.hpExpire("missing-key", inFiveSeconds, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsTwoWhenZeroProvided() { + long fiveSecondsAgo = Instant.now().minusSeconds(5L).getEpochSecond(); + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpireAt("hash-hexpire", fiveSecondsAgo, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hpExpireAtReturnsSuccessAndSetsTTL() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + actual.add(connection.hpExpireAt("hash-hexpire", inFiveSeconds, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy( value -> assertThat((Long)value).isBetween(0L, 5L)); + } + + @Test + @EnabledOnCommand("HEXPIREAT") + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + actual.add(connection.hpExpireAt("hash-hexpire", inFiveSeconds, "missing-field")); + actual.add(connection.hpExpireAt("missing-key", inFiveSeconds, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HPEXPIREAT") + public void hpExpireAdReturnsTwoWhenZeroProvided() { + long fiveSecondsAgo = Instant.now().minusSeconds(5L).getEpochSecond(); + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpireAt("hash-hexpire", fiveSecondsAgo, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsSuccessAndPersistsField() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); + actual.add(connection.hPersist("hash-hexpire", "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(1L), List.of(1L), List.of(-1L))); + } + + @Test + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hPersist("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); + } + + @Test + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hPersist("hash-hexpire", "missing-field")); + actual.add(connection.hPersist("missing-key", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test + @EnabledOnCommand("HTTL") + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); + } + + @Test + @EnabledOnCommand("HTTL") + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + actual.add(connection.hTtl("hash-hexpire", "missing-field")); + actual.add(connection.hTtl("missing-key", "key-2")); + + verifyResults(Arrays.asList(new Object[] { List.of(-2L), List.of(-2L) })); + } + @Test // DATAREDIS-694 void touchReturnsNrOfKeysTouched() { diff --git a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java index 8ec56d8b36..94246db629 100644 --- a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java @@ -36,6 +36,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.time.Duration; +import java.time.Instant; import java.util.*; import java.util.concurrent.TimeUnit; @@ -84,6 +85,7 @@ * @author Mark Paluch * @author Pavel Khokhlov * @author Dennis Neufeld + * @author Tihomir Mateev */ @EnabledOnRedisClusterAvailable @ExtendWith(JedisExtension.class) @@ -1038,6 +1040,148 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { assertThat(clusterConnection.hashCommands().hStrLen(KEY_1_BYTES, KEY_1_BYTES)).isEqualTo(0L); } + @Test + public void hExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hpExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS,KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hpExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + // missing field + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hpExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + // missing field + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hpExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hPersistReturnsSuccessAndPersistsField() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + + } + + @Test + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + + } + + @Test + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + + } + @Test // DATAREDIS-315 public void hValsShouldRetrieveValuesCorrectly() { diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java index 5ecba15478..94b3e4235a 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java @@ -32,6 +32,7 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; +import java.time.Instant; import java.util.*; import java.util.concurrent.TimeUnit; @@ -73,6 +74,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author Dennis Neufeld + * @author Tihomir Mateev */ @SuppressWarnings("deprecation") @EnabledOnRedisClusterAvailable @@ -1095,6 +1097,147 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { assertThat(clusterConnection.hashCommands().hStrLen(KEY_1_BYTES, KEY_1_BYTES)).isEqualTo(0L); } + @Test + public void hExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hpExpireReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS,KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hpExpireReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + // missing field + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hpExpireAtReturnsSuccessAndSetsTTL() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + // missing field + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test + public void hpExpireAdReturnsTwoWhenZeroProvided() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + public void hPersistReturnsSuccessAndPersistsField() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + + } + + @Test + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + + } + + @Test + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES,KEY_2_BYTES)).contains(-2L); + + } + @Test // DATAREDIS-315 public void hValsShouldRetrieveValuesCorrectly() { diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java index e152cf6311..08d6eff0b5 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java @@ -21,6 +21,8 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -35,6 +37,7 @@ * * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev */ public class LettuceReactiveHashCommandsIntegrationTests extends LettuceReactiveCommandsTestSupport { @@ -288,4 +291,61 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { connection.hashCommands().hStrLen(KEY_1_BBUFFER, FIELD_1_BBUFFER).as(StepVerifier::create).expectNext(0L) // .verifyComplete(); } + + @ParameterizedRedisTest + void hExpireShouldHandleMultipleParametersCorrectly() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hExpire(KEY_1_BBUFFER, Duration.ofSeconds(1), fields).as(StepVerifier::create) // + .expectNext(1L) + .expectNext(1L) + .expectNext(-2L) + .expectComplete() + .verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_2)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_3)).allSatisfy(it -> assertThat(it).isEqualTo(-2L)); + + } + + @ParameterizedRedisTest + void hExpireAtShouldHandleMultipleParametersCorrectly() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hExpireAt(KEY_1_BBUFFER, Instant.now().plusSeconds(1), fields).as(StepVerifier::create) // + .expectNext(1L) + .expectNext(1L) + .expectNext(-2L) + .expectComplete() + .verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1, FIELD_2)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_3)).allSatisfy(it -> assertThat(it).isEqualTo(-2L)); + + } + + @ParameterizedRedisTest + void hPersistShouldPersistFields() { + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + + assertThat(nativeCommands.hexpire(KEY_1, 1000, FIELD_1)) + .allSatisfy(it -> assertThat(it).isEqualTo(1L)); + + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hPersist(KEY_1_BBUFFER, fields).as(StepVerifier::create) // + .expectNext(1L) + .expectNext(-1L) + .expectNext(-2L) + .expectComplete() + .verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1, FIELD_2)).allSatisfy(it -> assertThat(it).isEqualTo(-1L)); + } } diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index fe79d8d014..7a53caf2b4 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -19,9 +19,13 @@ import static org.assertj.core.api.Assumptions.*; import java.io.IOException; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collection; +import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.BeforeEach; @@ -39,6 +43,7 @@ * * @author Jennifer Hickey * @author Christoph Strobl + * @author Tihomir Mateev * @param Key type * @param Hash key type * @param Hash value type @@ -202,4 +207,79 @@ void randomValue() { Map values = hashOps.randomEntries(key, 10); assertThat(values).hasSize(2).containsEntry(key1, val1).containsEntry(key2, val2); } + + @ParameterizedRedisTest + void testExpireAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expire(key, Duration.ofMillis(500), List.of(key1))) + .containsExactly(1L); + + assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1))) + .allSatisfy(it -> assertThat(it).isBetween(0L, 500L)); + } + + @ParameterizedRedisTest + void testExpireAndGetExpireSeconds() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expire(key, Duration.ofSeconds(5), List.of(key1, key2))) + .containsExactly(1L, 1L); + + assertThat(redisTemplate.opsForHash().getExpire(key, TimeUnit.SECONDS, List.of(key1, key2))) + .allSatisfy(it -> assertThat(it).isBetween(0L, 5L)); + } + + @ParameterizedRedisTest + void testExpireAtAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(500), List.of(key1, key2))) + .containsExactly(1L, 1L); + + assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1, key2))) + .allSatisfy(it -> assertThat(it).isBetween(0L, 500L)); + } + + @ParameterizedRedisTest + void testPersistAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(500), List.of(key1, key2))) + .containsExactly(1L, 1L); + + assertThat(redisTemplate.opsForHash().persist(key, List.of(key1, key2))) + .allSatisfy(it -> assertThat(it).isEqualTo(1L)); + + assertThat(redisTemplate.opsForHash().getExpire(key, List.of(key1, key2))) + .allSatisfy(it -> assertThat(it).isEqualTo(-1L)); + } }