diff --git a/.github/workflows/quality_check_pydanticv2.yml b/.github/workflows/quality_check_pydanticv2.yml
index 8855a90b3f..d0af293498 100644
--- a/.github/workflows/quality_check_pydanticv2.yml
+++ b/.github/workflows/quality_check_pydanticv2.yml
@@ -58,7 +58,9 @@ jobs:
python-version: ${{ matrix.python-version }}
cache: "poetry"
- name: Replacing Pydantic v1 with v2 > 2.0.3
- run: poetry add "pydantic=^2.0.3"
+ run: |
+ rm -rf poetry.lock
+ poetry add "pydantic=^2.0.3"
- name: Install dependencies
run: make dev
- name: Test with pytest
diff --git a/Makefile b/Makefile
index 0de65f2d48..80c89f7296 100644
--- a/Makefile
+++ b/Makefile
@@ -8,13 +8,13 @@ dev:
pip install --upgrade pip pre-commit poetry
poetry config --local virtualenvs.in-project true
@$(MAKE) dev-version-plugin
- poetry install --extras "all datamasking-aws-sdk"
+ poetry install --extras "all datamasking-aws-sdk redis"
pre-commit install
dev-gitpod:
pip install --upgrade pip poetry
@$(MAKE) dev-version-plugin
- poetry install --extras "all datamasking-aws-sdk"
+ poetry install --extras "all datamasking-aws-sdk redis"
pre-commit install
format:
diff --git a/aws_lambda_powertools/shared/functions.py b/aws_lambda_powertools/shared/functions.py
index fb36b98dc3..c427f0d720 100644
--- a/aws_lambda_powertools/shared/functions.py
+++ b/aws_lambda_powertools/shared/functions.py
@@ -6,6 +6,7 @@
import os
import warnings
from binascii import Error as BinAsciiError
+from pathlib import Path
from typing import Any, Dict, Generator, Optional, Union, overload
from aws_lambda_powertools.shared import constants
@@ -250,3 +251,32 @@ def dataclass_to_dict(data) -> dict:
import dataclasses
return dataclasses.asdict(data)
+
+
+def abs_lambda_path(relative_path: str = "") -> str:
+ """Return the absolute path from the given relative path to lambda handler.
+
+ Parameters
+ ----------
+ relative_path : str, optional
+ The relative path to the lambda handler, by default an empty string.
+
+ Returns
+ -------
+ str
+ The absolute path generated from the given relative path.
+ If the environment variable LAMBDA_TASK_ROOT is set, it will use that value.
+ Otherwise, it will use the current working directory.
+ If the path is empty, it will return the current working directory.
+ """
+ # Retrieve the LAMBDA_TASK_ROOT environment variable or default to an empty string
+ current_working_directory = os.environ.get("LAMBDA_TASK_ROOT", "")
+
+ # If LAMBDA_TASK_ROOT is not set, use the current working directory
+ if not current_working_directory:
+ current_working_directory = str(Path.cwd())
+
+ # Combine the current working directory and the relative path to get the absolute path
+ absolute_path = str(Path(current_working_directory, relative_path))
+
+ return absolute_path
diff --git a/aws_lambda_powertools/utilities/idempotency/__init__.py b/aws_lambda_powertools/utilities/idempotency/__init__.py
index 148b291ea6..ae27330cc1 100644
--- a/aws_lambda_powertools/utilities/idempotency/__init__.py
+++ b/aws_lambda_powertools/utilities/idempotency/__init__.py
@@ -11,4 +11,10 @@
from .idempotency import IdempotencyConfig, idempotent, idempotent_function
-__all__ = ("DynamoDBPersistenceLayer", "BasePersistenceLayer", "idempotent", "idempotent_function", "IdempotencyConfig")
+__all__ = (
+ "DynamoDBPersistenceLayer",
+ "BasePersistenceLayer",
+ "idempotent",
+ "idempotent_function",
+ "IdempotencyConfig",
+)
diff --git a/aws_lambda_powertools/utilities/idempotency/exceptions.py b/aws_lambda_powertools/utilities/idempotency/exceptions.py
index 6e5930549c..e4c57a8f2b 100644
--- a/aws_lambda_powertools/utilities/idempotency/exceptions.py
+++ b/aws_lambda_powertools/utilities/idempotency/exceptions.py
@@ -83,3 +83,21 @@ class IdempotencyNoSerializationModelError(BaseError):
"""
No model was supplied to the serializer
"""
+
+
+class IdempotencyPersistenceConfigError(BaseError):
+ """
+ The idempotency persistency configuration was unsupported
+ """
+
+
+class IdempotencyPersistenceConnectionError(BaseError):
+ """
+ Idempotency persistence connection error
+ """
+
+
+class IdempotencyPersistenceConsistencyError(BaseError):
+ """
+ Idempotency persistency consistency error, needs to be removed
+ """
diff --git a/aws_lambda_powertools/utilities/idempotency/persistence/base.py b/aws_lambda_powertools/utilities/idempotency/persistence/base.py
index eaea36127c..f3b12da031 100644
--- a/aws_lambda_powertools/utilities/idempotency/persistence/base.py
+++ b/aws_lambda_powertools/utilities/idempotency/persistence/base.py
@@ -374,7 +374,6 @@ def save_inprogress(self, data: Dict[str, Any], remaining_time_in_millis: Option
now = datetime.datetime.now()
period = datetime.timedelta(milliseconds=remaining_time_in_millis)
timestamp = (now + period).timestamp()
-
data_record.in_progress_expiry_timestamp = int(timestamp * 1000)
else:
warnings.warn(
diff --git a/aws_lambda_powertools/utilities/idempotency/persistence/dynamodb.py b/aws_lambda_powertools/utilities/idempotency/persistence/dynamodb.py
index 295f869dc3..913e88524e 100644
--- a/aws_lambda_powertools/utilities/idempotency/persistence/dynamodb.py
+++ b/aws_lambda_powertools/utilities/idempotency/persistence/dynamodb.py
@@ -66,6 +66,8 @@ def __init__(
DynamoDB attribute name for status, by default "status"
data_attr: str, optional
DynamoDB attribute name for response data, by default "data"
+ validation_key_attr: str, optional
+ DynamoDB attribute name for hashed representation of the parts of the event used for validation
boto_config: botocore.config.Config, optional
Botocore configuration to pass during client initialization
boto3_session : boto3.Session, optional
diff --git a/aws_lambda_powertools/utilities/idempotency/persistence/redis.py b/aws_lambda_powertools/utilities/idempotency/persistence/redis.py
new file mode 100644
index 0000000000..6dda3b7fbc
--- /dev/null
+++ b/aws_lambda_powertools/utilities/idempotency/persistence/redis.py
@@ -0,0 +1,511 @@
+from __future__ import annotations
+
+import datetime
+import json
+import logging
+from contextlib import contextmanager
+from datetime import timedelta
+from typing import Any, Dict
+
+import redis
+
+from aws_lambda_powertools.shared.types import Literal, Protocol
+from aws_lambda_powertools.utilities.idempotency import BasePersistenceLayer
+from aws_lambda_powertools.utilities.idempotency.exceptions import (
+ IdempotencyItemAlreadyExistsError,
+ IdempotencyItemNotFoundError,
+ IdempotencyPersistenceConfigError,
+ IdempotencyPersistenceConnectionError,
+ IdempotencyPersistenceConsistencyError,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.base import (
+ STATUS_CONSTANTS,
+ DataRecord,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class RedisClientProtocol(Protocol):
+ """
+ Protocol class defining the interface for a Redis client.
+
+ This protocol outlines the expected behavior of a Redis client, allowing for
+ standardization among different implementations and allowing customers to extend it
+ in their own implementation.
+
+ Methods
+ -------
+ - get(name: bytes | str | memoryview) -> bytes | str | None:
+ Retrieves the value associated with the given key.
+
+ - set(
+ name: str | bytes,
+ value: bytes | float | str,
+ ex: float | timedelta | None = ...,
+ px: float | timedelta | None = ...,
+ nx: bool = ...,
+ ) -> bool | None:
+ Sets the value for the specified key with optional parameters.
+
+ - delete(keys: bytes | str | memoryview) -> Any:
+ Deletes one or more keys.
+
+ Note
+ ----
+ - The `ex` parameter represents the expiration time in seconds.
+ - The `px` parameter represents the expiration time in milliseconds.
+ - The `nx` parameter, if True, sets the value only if the key does not exist.
+
+ Raises
+ ------
+ - NotImplementedError: If any of the methods are not implemented by the concrete class.
+ """
+
+ def get(self, name: bytes | str | memoryview) -> bytes | str | None:
+ raise NotImplementedError
+
+ def set( # noqa
+ self,
+ name: str | bytes,
+ value: bytes | float | str,
+ ex: float | timedelta | None = ...,
+ px: float | timedelta | None = ...,
+ nx: bool = ...,
+ ) -> bool | None:
+ raise NotImplementedError
+
+ def delete(self, keys: bytes | str | memoryview) -> Any:
+ raise NotImplementedError
+
+
+class RedisConnection:
+ def __init__(
+ self,
+ url: str = "",
+ host: str = "",
+ port: int = 6379,
+ username: str = "",
+ password: str = "", # nosec - password for Redis connection
+ db_index: int = 0,
+ mode: Literal["standalone", "cluster"] = "standalone",
+ ssl: bool = True,
+ ) -> None:
+ """
+ Initialize Redis connection which will be used in Redis persistence_store to support Idempotency
+
+ Parameters
+ ----------
+ host: str, optional
+ Redis host
+ port: int, optional: default 6379
+ Redis port
+ username: str, optional
+ Redis username
+ password: str, optional
+ Redis password
+ url: str, optional
+ Redis connection string, using url will override the host/port in the previous parameters
+ db_index: int, optional: default 0
+ Redis db index
+ mode: str, Literal["standalone","cluster"]
+ set Redis client mode, choose from standalone/cluster. The default is standalone
+ ssl: bool, optional: default True
+ set whether to use ssl for Redis connection
+
+ Examples
+ --------
+
+ ```python
+ from dataclasses import dataclass, field
+ from uuid import uuid4
+
+ from aws_lambda_powertools.utilities.idempotency import (
+ idempotent,
+ )
+ from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+ )
+
+ from aws_lambda_powertools.utilities.typing import LambdaContext
+
+ persistence_layer = RedisCachePersistenceLayer(host="localhost", port=6379)
+
+
+ @dataclass
+ class Payment:
+ user_id: str
+ product_id: str
+ payment_id: str = field(default_factory=lambda: f"{uuid4()}")
+
+
+ class PaymentError(Exception):
+ ...
+
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event: dict, context: LambdaContext):
+ try:
+ payment: Payment = create_subscription_payment(event)
+ return {
+ "payment_id": payment.payment_id,
+ "message": "success",
+ "statusCode": 200,
+ }
+ except Exception as exc:
+ raise PaymentError(f"Error creating payment {str(exc)}")
+
+
+ def create_subscription_payment(event: dict) -> Payment:
+ return Payment(**event)
+
+ ```
+ """
+ self.url = url
+ self.host = host
+ self.port = port
+ self.username = username
+ self.password = password
+ self.db_index = db_index
+ self.ssl = ssl
+ self.mode = mode
+
+ def _init_client(self) -> RedisClientProtocol:
+ logger.debug(f"Trying to connect to Redis: {self.host}")
+ client: type[redis.Redis | redis.cluster.RedisCluster]
+ if self.mode == "standalone":
+ client = redis.Redis
+ elif self.mode == "cluster":
+ client = redis.cluster.RedisCluster
+ else:
+ raise IdempotencyPersistenceConfigError(f"Mode {self.mode} not supported")
+
+ try:
+ if self.url:
+ logger.debug(f"Using URL format to connect to Redis: {self.host}")
+ return client.from_url(url=self.url)
+ else:
+ # Redis in cluster mode doesn't support db parameter
+ extra_param_connection: Dict[str, Any] = {}
+ if self.mode != "cluster":
+ extra_param_connection = {"db": self.db_index}
+
+ logger.debug(f"Using arguments to connect to Redis: {self.host}")
+ return client(
+ host=self.host,
+ port=self.port,
+ username=self.username,
+ password=self.password,
+ decode_responses=True,
+ ssl=self.ssl,
+ **extra_param_connection,
+ )
+ except redis.exceptions.ConnectionError as exc:
+ logger.debug(f"Cannot connect in Redis: {self.host}")
+ raise IdempotencyPersistenceConnectionError("Could not to connect to Redis", exc) from exc
+
+
+class RedisCachePersistenceLayer(BasePersistenceLayer):
+ def __init__(
+ self,
+ url: str = "",
+ host: str = "",
+ port: int = 6379,
+ username: str = "",
+ password: str = "", # nosec - password for Redis connection
+ db_index: int = 0,
+ mode: Literal["standalone", "cluster"] = "standalone",
+ ssl: bool = True,
+ client: RedisClientProtocol | None = None,
+ in_progress_expiry_attr: str = "in_progress_expiration",
+ expiry_attr: str = "expiration",
+ status_attr: str = "status",
+ data_attr: str = "data",
+ validation_key_attr: str = "validation",
+ ):
+ """
+ Initialize the Redis Persistence Layer
+
+ Parameters
+ ----------
+ host: str, optional
+ Redis host
+ port: int, optional: default 6379
+ Redis port
+ username: str, optional
+ Redis username
+ password: str, optional
+ Redis password
+ url: str, optional
+ Redis connection string, using url will override the host/port in the previous parameters
+ db_index: int, optional: default 0
+ Redis db index
+ mode: str, Literal["standalone","cluster"]
+ set Redis client mode, choose from standalone/cluster
+ ssl: bool, optional: default True
+ set whether to use ssl for Redis connection
+ client: RedisClientProtocol, optional
+ Bring your own Redis client that follows RedisClientProtocol.
+ If provided, all other connection configuration options will be ignored
+ expiry_attr: str, optional
+ Redis json attribute name for expiry timestamp, by default "expiration"
+ in_progress_expiry_attr: str, optional
+ Redis json attribute name for in-progress expiry timestamp, by default "in_progress_expiration"
+ status_attr: str, optional
+ Redis json attribute name for status, by default "status"
+ data_attr: str, optional
+ Redis json attribute name for response data, by default "data"
+ validation_key_attr: str, optional
+ Redis json attribute name for hashed representation of the parts of the event used for validation
+
+ Examples
+ --------
+
+ ```python
+ from redis import Redis
+ from aws_lambda_powertools.utilities.idempotency import (
+ idempotent,
+ )
+
+ from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+ )
+
+ client = redis.Redis(
+ host="localhost",
+ port="6379",
+ decode_responses=True,
+ )
+ persistence_layer = RedisCachePersistenceLayer(client=client)
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event: dict, context: LambdaContext):
+ print("expensive operation")
+ return {
+ "payment_id": 12345,
+ "message": "success",
+ "statusCode": 200,
+ }
+ ```
+ """
+
+ # Initialize Redis client with Redis config if no client is passed in
+ if client is None:
+ self.client = RedisConnection(
+ host=host,
+ port=port,
+ username=username,
+ password=password,
+ db_index=db_index,
+ url=url,
+ mode=mode,
+ ssl=ssl,
+ )._init_client()
+ else:
+ self.client = client
+
+ self.in_progress_expiry_attr = in_progress_expiry_attr
+ self.expiry_attr = expiry_attr
+ self.status_attr = status_attr
+ self.data_attr = data_attr
+ self.validation_key_attr = validation_key_attr
+ self._json_serializer = json.dumps
+ self._json_deserializer = json.loads
+ super(RedisCachePersistenceLayer, self).__init__()
+ self._orphan_lock_timeout = min(10, self.expires_after_seconds)
+
+ def _get_expiry_second(self, expiry_timestamp: int | None = None) -> int:
+ """
+ Calculates the number of seconds remaining until a specified expiry time
+ """
+ if expiry_timestamp:
+ return expiry_timestamp - int(datetime.datetime.now().timestamp())
+ return self.expires_after_seconds
+
+ def _item_to_data_record(self, idempotency_key: str, item: Dict[str, Any]) -> DataRecord:
+ in_progress_expiry_timestamp = item.get(self.in_progress_expiry_attr)
+
+ return DataRecord(
+ idempotency_key=idempotency_key,
+ status=item[self.status_attr],
+ in_progress_expiry_timestamp=in_progress_expiry_timestamp,
+ response_data=str(item.get(self.data_attr)),
+ payload_hash=str(item.get(self.validation_key_attr)),
+ expiry_timestamp=item.get("expiration", None),
+ )
+
+ def _get_record(self, idempotency_key) -> DataRecord:
+ # See: https://redis.io/commands/get/
+ response = self.client.get(idempotency_key)
+
+ # key not found
+ if not response:
+ raise IdempotencyItemNotFoundError
+
+ try:
+ item = self._json_deserializer(response)
+ except json.JSONDecodeError:
+ # Json decoding error is considered an Consistency error. This scenario will also introduce possible
+ # race condition just like Orphan record does. As two lambda handlers is possible to reach this line
+ # of code almost simultaneously. If we simply regard this record as non-valid record. The two lambda
+ # handlers will both start the overwrite process without knowing each other. Causing this value being
+ # overwritten twice (ultimately two Lambda Handlers will both be executed, which is against idempotency).
+ # So this case should also be handled by the error handling in IdempotencyPersistenceConsistencyError
+ # part to avoid the possible race condition.
+
+ raise IdempotencyPersistenceConsistencyError
+
+ return self._item_to_data_record(idempotency_key, item)
+
+ def _put_in_progress_record(self, data_record: DataRecord) -> None:
+ item: Dict[str, Any] = {
+ "name": data_record.idempotency_key,
+ "mapping": {
+ self.status_attr: data_record.status,
+ self.expiry_attr: data_record.expiry_timestamp,
+ },
+ }
+
+ if data_record.in_progress_expiry_timestamp is not None:
+ item["mapping"][self.in_progress_expiry_attr] = data_record.in_progress_expiry_timestamp
+
+ if self.payload_validation_enabled:
+ item["mapping"][self.validation_key_attr] = data_record.payload_hash
+
+ now = datetime.datetime.now()
+ try:
+ # | LOCKED | RETRY if status = "INPROGRESS" | RETRY
+ # |----------------|-------------------------------------------------------|-------------> .... (time)
+ # | Lambda Idempotency Record
+ # | Timeout Timeout
+ # | (in_progress_expiry) (expiry)
+
+ # Conditions to successfully save a record:
+
+ # The idempotency key does not exist:
+ # - first time that this invocation key is used
+ # - previous invocation with the same key was deleted due to TTL
+ # - SET see https://redis.io/commands/set/
+
+ logger.debug(f"Putting record on Redis for idempotency key: {data_record.idempotency_key}")
+ encoded_item = self._json_serializer(item["mapping"])
+ ttl = self._get_expiry_second(expiry_timestamp=data_record.expiry_timestamp)
+
+ redis_response = self.client.set(name=data_record.idempotency_key, value=encoded_item, ex=ttl, nx=True)
+
+ # If redis_response is True, the Redis SET operation was successful and the idempotency key was not
+ # previously set. This indicates that we can safely proceed to the handler execution phase.
+ # Most invocations should successfully proceed past this point.
+ if redis_response:
+ return
+
+ # If redis_response is None, it indicates an existing record in Redis for the given idempotency key.
+ # This could be due to:
+ # - An active idempotency record from a previous invocation that has not yet expired.
+ # - An orphan record where a previous invocation has timed out.
+ # - An expired idempotency record that has not been deleted by Redis.
+ # In any case, we proceed to retrieve the record for further inspection.
+
+ idempotency_record = self._get_record(data_record.idempotency_key)
+
+ # If the status of the idempotency record is 'COMPLETED' and the record has not expired
+ # (i.e., the expiry timestamp is greater than the current timestamp), then a valid completed
+ # record exists. We raise an error to prevent duplicate processing of a request that has already
+ # been completed successfully.
+ if idempotency_record.status == STATUS_CONSTANTS["COMPLETED"] and not idempotency_record.is_expired:
+ raise IdempotencyItemAlreadyExistsError
+
+ # If the idempotency record has a status of 'INPROGRESS' and has a valid in_progress_expiry_timestamp
+ # (meaning the timestamp is greater than the current timestamp in milliseconds), then we have encountered
+ # a valid in-progress record. This indicates that another process is currently handling the request, and
+ # to maintain idempotency, we raise an error to prevent concurrent processing of the same request.
+ if (
+ idempotency_record.status == STATUS_CONSTANTS["INPROGRESS"]
+ and idempotency_record.in_progress_expiry_timestamp
+ and idempotency_record.in_progress_expiry_timestamp > int(now.timestamp() * 1000)
+ ):
+ raise IdempotencyItemAlreadyExistsError
+
+ # Reaching this point indicates that the idempotency record found is an orphan record. An orphan record is
+ # one that is neither completed nor in-progress within its expected time frame. It may result from a
+ # previous invocation that has timed out or an expired record that has yet to be cleaned up by Redis.
+ # We raise an error to handle this exceptional scenario appropriately.
+ raise IdempotencyPersistenceConsistencyError
+
+ except IdempotencyPersistenceConsistencyError:
+ # Handle an orphan record by attempting to acquire a lock, which by default lasts for 10 seconds.
+ # The purpose of acquiring the lock is to prevent race conditions with other processes that might
+ # also be trying to handle the same orphan record. Once the lock is acquired, we set a new value
+ # for the idempotency record in Redis with the appropriate time-to-live (TTL).
+ with self._acquire_lock(name=item["name"]):
+ self.client.set(name=item["name"], value=encoded_item, ex=ttl)
+
+ # Not removing the lock here serves as a safeguard against race conditions,
+ # preventing another operation from mistakenly treating this record as an orphan while the
+ # current operation is still in progress.
+ except (redis.exceptions.RedisError, redis.exceptions.RedisClusterException) as e:
+ raise e
+ except Exception as e:
+ logger.debug(f"encountered non-Redis exception: {e}")
+ raise e
+
+ @contextmanager
+ def _acquire_lock(self, name: str):
+ """
+ Attempt to acquire a lock for a specified resource name, with a default timeout.
+ This context manager attempts to set a lock using Redis to prevent concurrent
+ access to a resource identified by 'name'. It uses the 'nx' flag to ensure that
+ the lock is only set if it does not already exist, thereby enforcing mutual exclusion.
+ """
+ try:
+ acquired = self.client.set(name=f"{name}:lock", value="True", ex=self._orphan_lock_timeout, nx=True)
+ logger.debug("acquiring lock to overwrite orphan record")
+ if acquired:
+ logger.debug("lock acquired")
+ yield
+ else:
+ # If the lock acquisition fails, it suggests a race condition has occurred. In this case, instead of
+ # proceeding, we log the event and raise an error to indicate that the current operation should be
+ # retried after the lock is released by the process that currently holds it.
+ logger.debug("lock acquisition failed, raise to retry")
+ raise IdempotencyItemAlreadyExistsError
+
+ finally:
+ ...
+
+ def _put_record(self, data_record: DataRecord) -> None:
+ if data_record.status == STATUS_CONSTANTS["INPROGRESS"]:
+ self._put_in_progress_record(data_record=data_record)
+ else:
+ # current this function only support set in_progress. set complete should use update_record
+ raise NotImplementedError
+
+ def _update_record(self, data_record: DataRecord) -> None:
+ item: Dict[str, Any] = {
+ "name": data_record.idempotency_key,
+ "mapping": {
+ self.data_attr: data_record.response_data,
+ self.status_attr: data_record.status,
+ self.expiry_attr: data_record.expiry_timestamp,
+ },
+ }
+ logger.debug(f"Updating record for idempotency key: {data_record.idempotency_key}")
+ encoded_item = self._json_serializer(item["mapping"])
+ ttl = self._get_expiry_second(data_record.expiry_timestamp)
+ # need to set ttl again, if we don't set ex here the record will not have a ttl
+ self.client.set(name=item["name"], value=encoded_item, ex=ttl)
+
+ def _delete_record(self, data_record: DataRecord) -> None:
+ """
+ Deletes the idempotency record associated with a given DataRecord from Redis.
+ This function is designed to be called after a Lambda handler invocation has completed processing.
+ It ensures that the idempotency key associated with the DataRecord is removed from Redis to
+ prevent future conflicts and to maintain the idempotency integrity.
+
+ Note: it is essential that the idempotency key is not empty, as that would indicate the Lambda
+ handler has not been invoked or the key was not properly set.
+ """
+ logger.debug(f"Deleting record for idempotency key: {data_record.idempotency_key}")
+
+ # See: https://redis.io/commands/del/
+ self.client.delete(data_record.idempotency_key)
diff --git a/docs/utilities/idempotency.md b/docs/utilities/idempotency.md
index 0d79cbf092..3b7fe344b1 100644
--- a/docs/utilities/idempotency.md
+++ b/docs/utilities/idempotency.md
@@ -14,6 +14,7 @@ The idempotency utility provides a simple solution to convert your Lambda functi
* Select a subset of the event as the idempotency key using JMESPath expressions
* Set a time window in which records with the same payload should be considered duplicates
* Expires in-progress executions if the Lambda function times out halfway through
+* Support Amazon DynamoDB and Redis as persistence layers
## Terminology
@@ -51,6 +52,9 @@ classDiagram
## Getting started
+???+ note
+ This section uses DynamoDB as the default idempotent persistence storage layer. If you are interested in using Redis as the persistence storage layer, check out the [Redis as persistence storage layer](#redis-as-persistent-storage-layer-provider) Section.
+
### IAM Permissions
Your Lambda function IAM Role must have `dynamodb:GetItem`, `dynamodb:PutItem`, `dynamodb:UpdateItem` and `dynamodb:DeleteItem` IAM permissions before using this feature.
@@ -62,7 +66,7 @@ Your Lambda function IAM Role must have `dynamodb:GetItem`, `dynamodb:PutItem`,
Before getting started, you need to create a persistent storage layer where the idempotency utility can store its state - your lambda functions will need read and write access to it.
-As of now, Amazon DynamoDB is the only supported persistent storage layer, so you'll need to create a table first.
+We currently support Amazon DynamoDB and Redis as a storage layer. The following example demonstrates how to create a table in DynamoDB. If you prefer to use Redis, refer go to the section [RedisPersistenceLayer](#redispersistencelayer) section.
**Default table configuration**
@@ -96,7 +100,7 @@ If you're not [changing the default configuration for the DynamoDB persistence l
???+ warning "Warning: Large responses with DynamoDB persistence layer"
When using this utility with DynamoDB, your function's responses must be [smaller than 400KB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html#limits-items){target="_blank"}.
- Larger items cannot be written to DynamoDB and will cause exceptions.
+ Larger items cannot be written to DynamoDB and will cause exceptions. If your response exceeds 400kb, consider using Redis as your persistence layer.
???+ info "Info: DynamoDB"
Each function invocation will generally make 2 requests to DynamoDB. If the
@@ -333,6 +337,51 @@ If an Exception is raised _outside_ the scope of the decorated function and afte
As this happens outside the scope of your decorated function, you are not able to catch it if you're using the `idempotent` decorator on your Lambda handler.
+### Persistence layers
+
+#### DynamoDBPersistenceLayer
+
+This persistence layer is built-in, allowing you to use an existing DynamoDB table or create a new one dedicated to idempotency state (recommended).
+
+=== "Customizing DynamoDBPersistenceLayer to suit your table structure"
+
+ ```python hl_lines="7-15"
+ --8<-- "examples/idempotency/src/customize_persistence_layer.py"
+ ```
+
+When using DynamoDB as the persistence layer, you can customize the attribute names by passing the following parameters during the initialization of the persistence layer:
+
+| Parameter | Required | Default | Description |
+| --------------------------- | ------------------ | ------------------------------------ | -------------------------------------------------------------------------------------------------------- |
+| **table_name** | :heavy_check_mark: | | Table name to store state |
+| **key_attr** | | `id` | Partition key of the table. Hashed representation of the payload (unless **sort_key_attr** is specified) |
+| **expiry_attr** | | `expiration` | Unix timestamp of when record expires |
+| **in_progress_expiry_attr** | | `in_progress_expiration` | Unix timestamp of when record expires while in progress (in case of the invocation times out) |
+| **status_attr** | | `status` | Stores status of the lambda execution during and after invocation |
+| **data_attr** | | `data` | Stores results of successfully executed Lambda handlers |
+| **validation_key_attr** | | `validation` | Hashed representation of the parts of the event used for validation |
+| **sort_key_attr** | | | Sort key of the table (if table is configured with a sort key). |
+| **static_pk_value** | | `idempotency#{LAMBDA_FUNCTION_NAME}` | Static value to use as the partition key. Only used when **sort_key_attr** is set. |
+
+#### RedisPersistenceLayer
+
+This persistence layer is built-in, allowing you to use an existing Redis service. For optimal performance and compatibility, it is strongly recommended to use a Redis service version 7 or higher.
+
+=== "Customizing RedisPersistenceLayer to suit your data structure"
+
+ ```python hl_lines="9-16"
+ --8<-- "examples/idempotency/src/customize_persistence_layer_redis.py"
+ ```
+
+When using Redis as the persistence layer, you can customize the attribute names by providing the following parameters upon initialization of the persistence layer:
+
+| Parameter | Required | Default | Description |
+| --------------------------- | ------------------ | ------------------------------------ | -------------------------------------------------------------------------------------------------------- |
+| **in_progress_expiry_attr** | | `in_progress_expiration` | Unix timestamp of when record expires while in progress (in case of the invocation times out) |
+| **status_attr** | | `status` | Stores status of the Lambda execution during and after invocation |
+| **data_attr** | | `data` | Stores results of successfully executed Lambda handlers |
+| **validation_key_attr** | | `validation` | Hashed representation of the parts of the event used for validation |
+
### Idempotency request flow
The following sequence diagrams explain how the Idempotency feature behaves under different scenarios.
@@ -527,33 +576,120 @@ sequenceDiagram
Optional idempotency key
-## Advanced
+#### Race condition with Redis
-### Persistence layers
+
+```mermaid
+graph TD;
+ A(Existing orphan record in redis)-->A1;
+ A1[Two Lambda invoke at same time]-->B1[Lambda handler1];
+ B1-->B2[Fetch from Redis];
+ B2-->B3[Handler1 got orphan record];
+ B3-->B4[Handler1 acquired lock];
+ B4-->B5[Handler1 overwrite orphan record]
+ B5-->B6[Handler1 continue to execution];
+ A1-->C1[Lambda handler2];
+ C1-->C2[Fetch from Redis];
+ C2-->C3[Handler2 got orphan record];
+ C3-->C4[Handler2 failed to acquire lock];
+ C4-->C5[Handler2 wait and fetch from Redis];
+ C5-->C6[Handler2 return without executing];
+ B6-->D(Lambda handler executed only once);
+ C6-->D;
+```
+Race condition with Redis
+
-#### DynamoDBPersistenceLayer
+## Redis as persistent storage layer provider
-This persistence layer is built-in, and you can either use an existing DynamoDB table or create a new one dedicated for idempotency state (recommended).
+### Redis resources
-=== "Customizing DynamoDBPersistenceLayer to suit your table structure"
+Before setting up Redis as the persistent storage layer provider, you must have an existing Redis service. We recommend you to use Redis compatible services such as [Amazon ElastiCache for Redis](https://aws.amazon.com/elasticache/redis/){target="_blank"} or [Amazon MemoryDB for Redis](https://aws.amazon.com/memorydb/){target="_blank"} as your persistent storage layer provider.
- ```python hl_lines="7-15"
- --8<-- "examples/idempotency/src/customize_persistence_layer.py"
+???+ tip "No existing Redis service?"
+ If you don't have an existing Redis service, we recommend using [DynamoDB](#dynamodbpersistencelayer) as the persistent storage layer provider.
+
+=== "AWS CloudFormation example"
+
+ ```yaml hl_lines="5"
+ --8<-- "examples/idempotency/templates/cfn_redis_serverless.yaml"
```
-When using DynamoDB as a persistence layer, you can alter the attribute names by passing these parameters when initializing the persistence layer:
+ 1. Replace the Security Group ID and Subnet ID to match your VPC settings.
-| Parameter | Required | Default | Description |
-| --------------------------- | ------------------ | ------------------------------------ | -------------------------------------------------------------------------------------------------------- |
-| **table_name** | :heavy_check_mark: | | Table name to store state |
-| **key_attr** | | `id` | Partition key of the table. Hashed representation of the payload (unless **sort_key_attr** is specified) |
-| **expiry_attr** | | `expiration` | Unix timestamp of when record expires |
-| **in_progress_expiry_attr** | | `in_progress_expiration` | Unix timestamp of when record expires while in progress (in case of the invocation times out) |
-| **status_attr** | | `status` | Stores status of the lambda execution during and after invocation |
-| **data_attr** | | `data` | Stores results of successfully executed Lambda handlers |
-| **validation_key_attr** | | `validation` | Hashed representation of the parts of the event used for validation |
-| **sort_key_attr** | | | Sort key of the table (if table is configured with a sort key). |
-| **static_pk_value** | | `idempotency#{LAMBDA_FUNCTION_NAME}` | Static value to use as the partition key. Only used when **sort_key_attr** is set. |
+### VPC Access
+
+Your Lambda Function must have network access to the Redis endpoint before using it as the idempotency persistent storage layer. In most cases, you will need to [configure VPC access](https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html){target="_blank"} for your Lambda Function.
+
+???+ tip "Amazon ElastiCache/MemoryDB for Redis as persistent storage layer provider"
+ If you plan to use Amazon ElastiCache for Redis as the idempotency persistent storage layer, you may find [this AWS tutorial](https://docs.aws.amazon.com/lambda/latest/dg/services-elasticache-tutorial.html){target="_blank"} helpful.
+ For those using Amazon MemoryDB for Redis, refer to [this AWS tutorial](https://aws.amazon.com/blogs/database/access-amazon-memorydb-for-redis-from-aws-lambda/){target="_blank"} specifically for the VPC setup guidance.
+
+After completing the VPC setup, you can use the templates provided below to set up Lambda functions with access to VPC internal subnets.
+
+=== "AWS Serverless Application Model (SAM) example"
+
+ ```yaml hl_lines="9"
+ --8<-- "examples/idempotency/templates/sam_redis_vpc.yaml"
+ ```
+
+ 1. Replace the Security Group ID and Subnet ID to match your VPC settings.
+
+### Configuring Redis persistence layer
+
+You can quickly get started by initializing the `RedisCachePersistenceLayer` class and applying the `idempotent` decorator to your Lambda handler. For a detailed example of using the `RedisCachePersistenceLayer`, refer to the [Persistence layers section](#redispersistencelayer).
+
+???+ info
+ We enforce security best practices by using SSL connections in the `RedisCachePersistenceLayer`; to disable it, set `ssl=False`
+
+=== "Use Persistence Layer with Redis config variables"
+ ```python hl_lines="7-9 12 26"
+ --8<-- "examples/idempotency/src/getting_started_with_idempotency_redis_config.py"
+ ```
+
+=== "Use established Redis Client"
+ ```python hl_lines="4 9-11 14 22 36"
+ --8<-- "examples/idempotency/src/getting_started_with_idempotency_redis_client.py"
+ ```
+
+=== "Sample event"
+
+ ```json
+ --8<-- "examples/idempotency/src/getting_started_with_idempotency_payload.json"
+ ```
+
+### Custom advanced settings
+
+For advanced configurations, such as setting up SSL certificates or customizing parameters like a custom timeout, you can utilize the Redis client to tailor these specific settings to your needs.
+
+=== "Advanced configuration using AWS Secrets"
+ ```python hl_lines="7-9 11 13 23"
+ --8<-- "examples/idempotency/src/using_redis_client_with_aws_secrets.py"
+ ```
+
+ 1. JSON stored:
+ {
+ "REDIS_ENDPOINT": "127.0.0.1",
+ "REDIS_PORT": "6379",
+ "REDIS_PASSWORD": "redis-secret"
+ }
+
+=== "Advanced configuration with local certificates"
+ ```python hl_lines="12 23-25"
+ --8<-- "examples/idempotency/src/using_redis_client_with_local_certs.py"
+ ```
+
+ 1. JSON stored:
+ {
+ "REDIS_ENDPOINT": "127.0.0.1",
+ "REDIS_PORT": "6379",
+ "REDIS_PASSWORD": "redis-secret"
+ }
+ 2. redis_user.crt file stored in the "certs" directory of your Lambda function
+ 3. redis_user_private.key file stored in the "certs" directory of your Lambda function
+ 4. redis_ca.pem file stored in the "certs" directory of your Lambda function
+
+## Advanced
### Customizing the default behavior
@@ -783,7 +919,7 @@ The idempotency utility can be used with the `validator` decorator. Ensure that
If you use an envelope with the validator, the event received by the idempotency utility will be the unwrapped
event - not the "raw" event Lambda was invoked with.
- Make sure to account for this behaviour, if you set the `event_key_jmespath`.
+ Make sure to account for this behavior, if you set the `event_key_jmespath`.
=== "Using Idempotency with JSONSchema Validation utility"
@@ -854,6 +990,40 @@ This means it is possible to pass a mocked Table resource, or stub various metho
--8<-- "examples/idempotency/tests/app_test_io_operations.py"
```
+### Testing with Redis
+
+To test locally, you can either utilize [fakeredis-py](https://github.com/cunla/fakeredis-py) for a simulated Redis environment or refer to the [MockRedis](https://github.com/aws-powertools/powertools-lambda-python/blob/ba6532a1c73e20fdaee88c5795fd40e978553e14/tests/functional/idempotency/persistence/test_redis_layer.py#L34-L66) class used in our tests to mock Redis operations.
+
+=== "test_with_mock_redis.py"
+
+ ```python hl_lines="2 3 29 31"
+ --8<-- "examples/idempotency/tests/test_with_mock_redis.py"
+ ```
+
+=== "mock_redis.py"
+
+ ```python
+ --8<-- "examples/idempotency/tests/mock_redis.py"
+ ```
+
+If you want to set up a real Redis client for integration testing, you can reference the code provided below.
+
+=== "test_with_real_redis.py"
+
+ ```python hl_lines="3 4 29 38"
+ --8<-- "examples/idempotency/tests/test_with_real_redis.py"
+ ```
+
+=== "Makefile"
+
+ ```bash
+ test-idempotency-redis: # (1)!
+ docker run --name test-idempotency-redis -d -p 63005:6379 redis
+ pytest test_with_real_redis.py;docker stop test-idempotency-redis;docker rm test-idempotency-redis
+ ```
+
+ 1. Use this script to setup a temp Redis docker and auto remove it upon completion
+
## Extra resources
If you're interested in a deep dive on how Amazon uses idempotency when building our APIs, check out
diff --git a/examples/idempotency/src/customize_persistence_layer_redis.py b/examples/idempotency/src/customize_persistence_layer_redis.py
new file mode 100644
index 0000000000..7db3d1b53e
--- /dev/null
+++ b/examples/idempotency/src/customize_persistence_layer_redis.py
@@ -0,0 +1,21 @@
+from aws_lambda_powertools.utilities.idempotency import (
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+from aws_lambda_powertools.utilities.typing import LambdaContext
+
+persistence_layer = RedisCachePersistenceLayer(
+ host="localhost",
+ port=6379,
+ in_progress_expiry_attr="in_progress_expiration",
+ status_attr="status",
+ data_attr="data",
+ validation_key_attr="validation",
+)
+
+
+@idempotent(persistence_store=persistence_layer)
+def lambda_handler(event: dict, context: LambdaContext) -> dict:
+ return event
diff --git a/examples/idempotency/src/getting_started_with_idempotency_redis_client.py b/examples/idempotency/src/getting_started_with_idempotency_redis_client.py
new file mode 100644
index 0000000000..f06d059fad
--- /dev/null
+++ b/examples/idempotency/src/getting_started_with_idempotency_redis_client.py
@@ -0,0 +1,50 @@
+from dataclasses import dataclass, field
+from uuid import uuid4
+
+from redis import Redis
+
+from aws_lambda_powertools.utilities.idempotency import (
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+from aws_lambda_powertools.utilities.typing import LambdaContext
+
+client = Redis(
+ host="localhost",
+ port=6379,
+ socket_connect_timeout=5,
+ socket_timeout=5,
+ max_connections=1000,
+)
+
+persistence_layer = RedisCachePersistenceLayer(client=client)
+
+
+@dataclass
+class Payment:
+ user_id: str
+ product_id: str
+ payment_id: str = field(default_factory=lambda: f"{uuid4()}")
+
+
+class PaymentError(Exception):
+ ...
+
+
+@idempotent(persistence_store=persistence_layer)
+def lambda_handler(event: dict, context: LambdaContext):
+ try:
+ payment: Payment = create_subscription_payment(event)
+ return {
+ "payment_id": payment.payment_id,
+ "message": "success",
+ "statusCode": 200,
+ }
+ except Exception as exc:
+ raise PaymentError(f"Error creating payment {str(exc)}")
+
+
+def create_subscription_payment(event: dict) -> Payment:
+ return Payment(**event)
diff --git a/examples/idempotency/src/getting_started_with_idempotency_redis_config.py b/examples/idempotency/src/getting_started_with_idempotency_redis_config.py
new file mode 100644
index 0000000000..de9c652605
--- /dev/null
+++ b/examples/idempotency/src/getting_started_with_idempotency_redis_config.py
@@ -0,0 +1,40 @@
+from dataclasses import dataclass, field
+from uuid import uuid4
+
+from aws_lambda_powertools.utilities.idempotency import (
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+from aws_lambda_powertools.utilities.typing import LambdaContext
+
+persistence_layer = RedisCachePersistenceLayer(host="localhost", port=6379)
+
+
+@dataclass
+class Payment:
+ user_id: str
+ product_id: str
+ payment_id: str = field(default_factory=lambda: f"{uuid4()}")
+
+
+class PaymentError(Exception):
+ ...
+
+
+@idempotent(persistence_store=persistence_layer)
+def lambda_handler(event: dict, context: LambdaContext):
+ try:
+ payment: Payment = create_subscription_payment(event)
+ return {
+ "payment_id": payment.payment_id,
+ "message": "success",
+ "statusCode": 200,
+ }
+ except Exception as exc:
+ raise PaymentError(f"Error creating payment {str(exc)}")
+
+
+def create_subscription_payment(event: dict) -> Payment:
+ return Payment(**event)
diff --git a/examples/idempotency/src/integrate_idempotency_with_batch_processor_payload.json b/examples/idempotency/src/integrate_idempotency_with_batch_processor_payload.json
index 73a5029d61..e016cf7946 100644
--- a/examples/idempotency/src/integrate_idempotency_with_batch_processor_payload.json
+++ b/examples/idempotency/src/integrate_idempotency_with_batch_processor_payload.json
@@ -7,7 +7,7 @@
"attributes": {
"ApproximateReceiveCount": "1",
"SentTimestamp": "1545082649183",
- "SenderId": "AIDAIENQZJOLO23YVJ4VO",
+ "SenderId": "replace-to-pass-gitleak",
"ApproximateFirstReceiveTimestamp": "1545082649185"
},
"messageAttributes": {
diff --git a/examples/idempotency/src/using_redis_client_with_aws_secrets.py b/examples/idempotency/src/using_redis_client_with_aws_secrets.py
new file mode 100644
index 0000000000..f30751c880
--- /dev/null
+++ b/examples/idempotency/src/using_redis_client_with_aws_secrets.py
@@ -0,0 +1,31 @@
+from typing import Any
+
+from redis import Redis
+
+from aws_lambda_powertools.utilities import parameters
+from aws_lambda_powertools.utilities.idempotency import IdempotencyConfig, idempotent
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+
+redis_values: Any = parameters.get_secret("redis_info", transform="json") # (1)!
+
+redis_client = Redis(
+ host=redis_values.get("REDIS_HOST"),
+ port=redis_values.get("REDIS_PORT"),
+ password=redis_values.get("REDIS_PASSWORD"),
+ decode_responses=True,
+ socket_timeout=10.0,
+ ssl=True,
+ retry_on_timeout=True,
+)
+
+persistence_layer = RedisCachePersistenceLayer(client=redis_client)
+config = IdempotencyConfig(
+ expires_after_seconds=2 * 60, # 2 minutes
+)
+
+
+@idempotent(config=config, persistence_store=persistence_layer)
+def lambda_handler(event, context):
+ return {"message": "Hello"}
diff --git a/examples/idempotency/src/using_redis_client_with_local_certs.py b/examples/idempotency/src/using_redis_client_with_local_certs.py
new file mode 100644
index 0000000000..cbad1cc92f
--- /dev/null
+++ b/examples/idempotency/src/using_redis_client_with_local_certs.py
@@ -0,0 +1,36 @@
+from typing import Any
+
+from redis import Redis
+
+from aws_lambda_powertools.shared.functions import abs_lambda_path
+from aws_lambda_powertools.utilities import parameters
+from aws_lambda_powertools.utilities.idempotency import IdempotencyConfig, idempotent
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+
+redis_values: Any = parameters.get_secret("redis_info", transform="json") # (1)!
+
+
+redis_client = Redis(
+ host=redis_values.get("REDIS_HOST"),
+ port=redis_values.get("REDIS_PORT"),
+ password=redis_values.get("REDIS_PASSWORD"),
+ decode_responses=True,
+ socket_timeout=10.0,
+ ssl=True,
+ retry_on_timeout=True,
+ ssl_certfile=f"{abs_lambda_path()}/certs/redis_user.crt", # (2)!
+ ssl_keyfile=f"{abs_lambda_path()}/certs/redis_user_private.key", # (3)!
+ ssl_ca_certs=f"{abs_lambda_path()}/certs/redis_ca.pem", # (4)!
+)
+
+persistence_layer = RedisCachePersistenceLayer(client=redis_client)
+config = IdempotencyConfig(
+ expires_after_seconds=2 * 60, # 2 minutes
+)
+
+
+@idempotent(config=config, persistence_store=persistence_layer)
+def lambda_handler(event, context):
+ return {"message": "Hello"}
diff --git a/examples/idempotency/templates/cfn_redis_serverless.yaml b/examples/idempotency/templates/cfn_redis_serverless.yaml
new file mode 100644
index 0000000000..9087efce6f
--- /dev/null
+++ b/examples/idempotency/templates/cfn_redis_serverless.yaml
@@ -0,0 +1,13 @@
+AWSTemplateFormatVersion: '2010-09-09'
+
+Resources:
+ RedisServerlessIdempotency:
+ Type: AWS::ElastiCache::ServerlessCache
+ Properties:
+ Engine: redis
+ ServerlessCacheName: redis-cache
+ SecurityGroupIds: # (1)!
+ - security-{your_sg_id}
+ SubnetIds:
+ - subnet-{your_subnet_id_1}
+ - subnet-{your_subnet_id_2}
diff --git a/examples/idempotency/templates/sam_redis_vpc.yaml b/examples/idempotency/templates/sam_redis_vpc.yaml
new file mode 100644
index 0000000000..921b1e75b8
--- /dev/null
+++ b/examples/idempotency/templates/sam_redis_vpc.yaml
@@ -0,0 +1,14 @@
+AWSTemplateFormatVersion: '2010-09-09'
+Transform: AWS::Serverless-2016-10-31
+Resources:
+ HelloWorldFunction:
+ Type: AWS::Serverless::Function
+ Properties:
+ Runtime: python3.11
+ Handler: app.py
+ VpcConfig: # (1)!
+ SecurityGroupIds:
+ - security-{your_sg_id}
+ SubnetIds:
+ - subnet-{your_subnet_id_1}
+ - subnet-{your_subnet_id_2}
diff --git a/examples/idempotency/tests/mock_redis.py b/examples/idempotency/tests/mock_redis.py
new file mode 100644
index 0000000000..89b68e4873
--- /dev/null
+++ b/examples/idempotency/tests/mock_redis.py
@@ -0,0 +1,37 @@
+import time as t
+from typing import Dict
+
+
+# Mock redis class that includes all operations we used in Idempotency
+class MockRedis:
+ def __init__(self, decode_responses, cache: Dict, **kwargs):
+ self.cache = cache or {}
+ self.expire_dict: Dict = {}
+ self.decode_responses = decode_responses
+ self.acl: Dict = {}
+ self.username = ""
+
+ def hset(self, name, mapping):
+ self.expire_dict.pop(name, {})
+ self.cache[name] = mapping
+
+ def from_url(self, url: str):
+ pass
+
+ def expire(self, name, time):
+ self.expire_dict[name] = t.time() + time
+
+ # return {} if no match
+ def hgetall(self, name):
+ if self.expire_dict.get(name, t.time() + 1) < t.time():
+ self.cache.pop(name, {})
+ return self.cache.get(name, {})
+
+ def get_connection_kwargs(self):
+ return {"decode_responses": self.decode_responses}
+
+ def auth(self, username, **kwargs):
+ self.username = username
+
+ def delete(self, name):
+ self.cache.pop(name, {})
diff --git a/examples/idempotency/tests/test_with_mock_redis.py b/examples/idempotency/tests/test_with_mock_redis.py
new file mode 100644
index 0000000000..6842e818d2
--- /dev/null
+++ b/examples/idempotency/tests/test_with_mock_redis.py
@@ -0,0 +1,48 @@
+from dataclasses import dataclass
+
+import pytest
+from mock_redis import MockRedis
+
+from aws_lambda_powertools.utilities.idempotency import (
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+from aws_lambda_powertools.utilities.typing import LambdaContext
+
+
+@pytest.fixture
+def lambda_context():
+ @dataclass
+ class LambdaContext:
+ function_name: str = "test"
+ memory_limit_in_mb: int = 128
+ invoked_function_arn: str = "arn:aws:lambda:eu-west-1:809313241:function:test"
+ aws_request_id: str = "52fdfc07-2182-154f-163f-5f0f9a621d72"
+
+ def get_remaining_time_in_millis(self) -> int:
+ return 1000
+
+ return LambdaContext()
+
+
+def test_idempotent_lambda(lambda_context):
+ # Init the Mock redis client
+ redis_client = MockRedis(decode_responses=True)
+ # Establish persistence layer using the mock redis client
+ persistence_layer = RedisCachePersistenceLayer(client=redis_client)
+
+ # setup idempotent with redis persistence layer
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event: dict, context: LambdaContext):
+ print("expensive operation")
+ return {
+ "payment_id": 12345,
+ "message": "success",
+ "statusCode": 200,
+ }
+
+ # Inovke the sim lambda handler
+ result = lambda_handler({"testkey": "testvalue"}, lambda_context)
+ assert result["payment_id"] == 12345
diff --git a/examples/idempotency/tests/test_with_real_redis.py b/examples/idempotency/tests/test_with_real_redis.py
new file mode 100644
index 0000000000..a465592eb0
--- /dev/null
+++ b/examples/idempotency/tests/test_with_real_redis.py
@@ -0,0 +1,59 @@
+from dataclasses import dataclass
+
+import pytest
+import redis
+
+from aws_lambda_powertools.utilities.idempotency import (
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+from aws_lambda_powertools.utilities.typing import LambdaContext
+
+
+@pytest.fixture
+def lambda_context():
+ @dataclass
+ class LambdaContext:
+ function_name: str = "test"
+ memory_limit_in_mb: int = 128
+ invoked_function_arn: str = "arn:aws:lambda:eu-west-1:809313241:function:test"
+ aws_request_id: str = "52fdfc07-2182-154f-163f-5f0f9a621d72"
+
+ def get_remaining_time_in_millis(self) -> int:
+ return 1000
+
+ return LambdaContext()
+
+
+@pytest.fixture
+def persistence_store_standalone_redis():
+ # init a Real Redis client and connect to the Port set in the Makefile
+ redis_client = redis.Redis(
+ host="localhost",
+ port="63005",
+ decode_responses=True,
+ )
+
+ # return a persistence layer with real Redis
+ return RedisCachePersistenceLayer(client=redis_client)
+
+
+def test_idempotent_lambda(lambda_context, persistence_store_standalone_redis):
+ # Establish persistence layer using the real redis client
+ persistence_layer = persistence_store_standalone_redis
+
+ # setup idempotent with redis persistence layer
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event: dict, context: LambdaContext):
+ print("expensive operation")
+ return {
+ "payment_id": 12345,
+ "message": "success",
+ "statusCode": 200,
+ }
+
+ # Inovke the sim lambda handler
+ result = lambda_handler({"testkey": "testvalue"}, lambda_context)
+ assert result["payment_id"] == 12345
diff --git a/poetry.lock b/poetry.lock
index fde99b440a..040fb4cc94 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -22,15 +22,29 @@ doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-
test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
trio = ["trio (<0.22)"]
+[[package]]
+name = "async-timeout"
+version = "4.0.3"
+description = "Timeout context manager for asyncio programs"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
+ {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=3.6.5", markers = "python_version < \"3.8\""}
+
[[package]]
name = "attrs"
-version = "23.1.0"
+version = "23.2.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.7"
files = [
- {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
- {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+ {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"},
+ {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"},
]
[package.dependencies]
@@ -38,10 +52,11 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
[package.extras]
cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
-dev = ["attrs[docs,tests]", "pre-commit"]
+dev = ["attrs[tests]", "pre-commit"]
docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
tests = ["attrs[tests-no-zope]", "zope-interface"]
-tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
+tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
[[package]]
name = "aws-cdk-asset-awscli-v1"
@@ -235,18 +250,17 @@ wrapt = "*"
[[package]]
name = "babel"
-version = "2.13.1"
+version = "2.14.0"
description = "Internationalization utilities"
optional = false
python-versions = ">=3.7"
files = [
- {file = "Babel-2.13.1-py3-none-any.whl", hash = "sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"},
- {file = "Babel-2.13.1.tar.gz", hash = "sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"},
+ {file = "Babel-2.14.0-py3-none-any.whl", hash = "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"},
+ {file = "Babel-2.14.0.tar.gz", hash = "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363"},
]
[package.dependencies]
pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""}
-setuptools = {version = "*", markers = "python_version >= \"3.12\""}
[package.extras]
dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
@@ -326,17 +340,17 @@ uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "boto3"
-version = "1.33.8"
+version = "1.33.13"
description = "The AWS SDK for Python"
optional = false
python-versions = ">= 3.7"
files = [
- {file = "boto3-1.33.8-py3-none-any.whl", hash = "sha256:b8c818125489fc0371ef28d806d36d8f1dcb71734fcb0d96b3201563e3e86f22"},
- {file = "boto3-1.33.8.tar.gz", hash = "sha256:d02a084b25aa8d46ef917b128e90877efab1ba45f9d1ba3a11f336930378e350"},
+ {file = "boto3-1.33.13-py3-none-any.whl", hash = "sha256:5f278b95fb2b32f3d09d950759a05664357ba35d81107bab1537c4ddd212cd8c"},
+ {file = "boto3-1.33.13.tar.gz", hash = "sha256:0e966b8a475ecb06cc0846304454b8da2473d4c8198a45dfb2c5304871986883"},
]
[package.dependencies]
-botocore = ">=1.33.8,<1.34.0"
+botocore = ">=1.33.13,<1.34.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.8.2,<0.9.0"
@@ -345,13 +359,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
-version = "1.33.8"
+version = "1.33.13"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">= 3.7"
files = [
- {file = "botocore-1.33.8-py3-none-any.whl", hash = "sha256:90236e6e69d7e80875d7f9d39383630706edbc1298026698c6c70d9b6a65576e"},
- {file = "botocore-1.33.8.tar.gz", hash = "sha256:e6970bf89cbe2624399aeffce52c253917d8e5a1c671de4054557603ab56c922"},
+ {file = "botocore-1.33.13-py3-none-any.whl", hash = "sha256:aeadccf4b7c674c7d47e713ef34671b834bc3e89723ef96d994409c9f54666e6"},
+ {file = "botocore-1.33.13.tar.gz", hash = "sha256:fb577f4cb175605527458b04571451db1bd1a2036976b626206036acd4496617"},
]
[package.dependencies]
@@ -430,7 +444,7 @@ files = [
name = "cffi"
version = "1.15.1"
description = "Foreign Function Interface for Python calling C code."
-optional = true
+optional = false
python-versions = "*"
files = [
{file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
@@ -756,7 +770,7 @@ toml = ["tomli"]
name = "cryptography"
version = "41.0.7"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
-optional = true
+optional = false
python-versions = ">=3.7"
files = [
{file = "cryptography-41.0.7-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:3c78451b78313fa81607fa1b3f1ae0a5ddd8014c38a02d9db0616133987b9cdf"},
@@ -853,71 +867,71 @@ six = "*"
[[package]]
name = "ddtrace"
-version = "2.3.1"
+version = "2.4.0"
description = "Datadog APM client library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "ddtrace-2.3.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:556a046413024cf53ebb0256bbf957692a5e417599e04dac5793e659d08c398c"},
- {file = "ddtrace-2.3.1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6066f1deddb454b8e098e5a0eb53ab36d81344209fdf6bec94767358da190294"},
- {file = "ddtrace-2.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb2b950901845b966a7805ff49a9ad58dcd5e9c27b5b804079977a1309c5b4fb"},
- {file = "ddtrace-2.3.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05b0da47bc98a9802faa2557e83c096868c4ef249c3d9a43f8e5daf91d1c8e4f"},
- {file = "ddtrace-2.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0183c5178112604eb012653fd17d0947e6e2f17325f93b1e32cc6af05ceffd0"},
- {file = "ddtrace-2.3.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:462eb671cd78780af6e42b43f2bc451537a0d283db054c175348e9b3a1fcaff4"},
- {file = "ddtrace-2.3.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9b5284786a0912a9739665a33760f561423524e2d250c0b0bb2dedf6edba2da5"},
- {file = "ddtrace-2.3.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:dbfb1ade5725a63f21945ab8234e64e46645e98a7deb4342eddf6e86d0f9145c"},
- {file = "ddtrace-2.3.1-cp310-cp310-win32.whl", hash = "sha256:1f51732c4181e5b671a5ae3c6c786ce3b9fd2abacad2d4249d53a55564906902"},
- {file = "ddtrace-2.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f0ae5814fbb51b4aba4d4f4b5c1fd2110790b04d4141cf4a03291566d1d5b0f"},
- {file = "ddtrace-2.3.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:24f4df55fd952182efe6815748db4675540f6fb674d9838dfa680dec1fdd176f"},
- {file = "ddtrace-2.3.1-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:04b4e476f78389021b50b3ae5c4d494bbbd033a300e93253fe1f873a67611436"},
- {file = "ddtrace-2.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:711978dd14c0aca7eaf90587b8608c891b82e1767fc6f2be7d82b67d56c8d580"},
- {file = "ddtrace-2.3.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfa6b1b2698029b7b1f8cc351869397c33bff996159660a00ca254d9fcc5b78d"},
- {file = "ddtrace-2.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd7295921009ccc61f5325cc3d30fc6182396fc8e598975b372bdf94fd16077"},
- {file = "ddtrace-2.3.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:94aa6a2e16d05cbb2d7a9a7553ca9b638e5b200e0d80fd027179e6af0faf59a2"},
- {file = "ddtrace-2.3.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:62f67040ef16149a46c8506d92a2824d7ded39427a51947a3651d572bb7a379f"},
- {file = "ddtrace-2.3.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:02622c4b8d5497f6367d9ccad38ac8c59d46fc3373034be114474fb01b1a28e6"},
- {file = "ddtrace-2.3.1-cp311-cp311-win32.whl", hash = "sha256:1d13ec5393802a619f922fb37a9f534911f44554bd0434dfd2d8db4e8897649e"},
- {file = "ddtrace-2.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:36b3427136f61d499f3fd307f97ae168a4d2728887e1922204e509a5aa72a4a3"},
- {file = "ddtrace-2.3.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:ac47d141e03c8bea3953fc5f51ac284de9ff4e6325faf2554b003ac906bc4da8"},
- {file = "ddtrace-2.3.1-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:dd23e10b4cac1cf26e64d4d1ec1d6e173e609a207f5520469326f5cff6c7e462"},
- {file = "ddtrace-2.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a31cddf750d7a28c886c194624c6be5a4475de064489002df898731f27f3d16"},
- {file = "ddtrace-2.3.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3dedd8097e58519f47f8908fe684f37c8f9722ce4b0614de78d9f39b83621dc7"},
- {file = "ddtrace-2.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:367aed800b78fb4d2af332c44d07d7126b1dbf758af422299f9a177811ec723d"},
- {file = "ddtrace-2.3.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca4dea67facdeba44040d9af8eeff96fb9a35a2b1cff93255e33a4d7250881b9"},
- {file = "ddtrace-2.3.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a661e133d451416741c6c2ad96baa417a1267204975bfb0d247cab748ecc3ed1"},
- {file = "ddtrace-2.3.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:556f60d6c9bbfc2da6d7f6751625fa3ae597c26bb8bbe74953db0d2d74f93b04"},
- {file = "ddtrace-2.3.1-cp312-cp312-win32.whl", hash = "sha256:261e20b9e9a363ec2dc728f8a009a2b1d3c9de4fbe07438b5600902a285bb179"},
- {file = "ddtrace-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:119be400024efff2f0eb66216b2aa3d2a700cd9b4a07605f7f9c94eb5e4b4cb5"},
- {file = "ddtrace-2.3.1-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:a66d0e0cccfa2fb207fc9a4d9ca6ab235a768f130129d6bb1dd256b7b3d34305"},
- {file = "ddtrace-2.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c465e43b96380f09e1e8f2d0f9cb3b78b4ef2bb211f25b57c925bb79f53cb00c"},
- {file = "ddtrace-2.3.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3eaaf8c5b63e07533822425b3402552c75adf091a1f0a6bf949725fa610c779"},
- {file = "ddtrace-2.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:600551ecd232df060203714dc1acba4809e9194fc91a7c638b68c548e92af171"},
- {file = "ddtrace-2.3.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:837232d708956a5d595a3618641c188a5844d663e0f77b1461f20c83f74a21c0"},
- {file = "ddtrace-2.3.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ddf3043581e2424fc3d4271ee00a038651a4ec9d2610eeaa2d6645095c9f4960"},
- {file = "ddtrace-2.3.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:63c6b28096e273431da923a8dfc0f54f7d472c3c78f0a5c4c99ed7e210b9c855"},
- {file = "ddtrace-2.3.1-cp37-cp37m-win32.whl", hash = "sha256:8b09a42cc975f798bfda9b8d8bf5c8c813022bfcf48b9e0e5e90caf4cf33ee8f"},
- {file = "ddtrace-2.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:66b49153c85423d5e99b1f364cc3b4a3ffedf35be0f3eb840f3bacd7c58100e8"},
- {file = "ddtrace-2.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:81f0bd1d50c8fc7d8a96e38f746ca4421fa3b52991f0df44e5e9faeb5a934c2b"},
- {file = "ddtrace-2.3.1-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:37d600d582a5046f82cf77ae9247cf15cf62cf23c15739c5f23c30db2aa092c9"},
- {file = "ddtrace-2.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60a62cfa22695cb1392c617910fb389c7240fa9dae0b5792bd87ff3ae82d2c45"},
- {file = "ddtrace-2.3.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1bdf55fa4a842f9786ca30434b31bf6f877e95af86b6fb7a5a540ce592f566b7"},
- {file = "ddtrace-2.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63032c6a76173cab03c021e65c1997a12c0c571263caf00ec18b82c2293c49be"},
- {file = "ddtrace-2.3.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:516b830e52bc8ac2988f11a06a6c6a5296f73b119e99e8ee55a34e531389acea"},
- {file = "ddtrace-2.3.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:86e7764759043439c3f672f998f60bb9118fc4a6d7f603c762b125471b17f549"},
- {file = "ddtrace-2.3.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:87ae203dd8fa3e04f8855786ae4b2f103bc66c9f2368ee2b4e620bccdde9b34d"},
- {file = "ddtrace-2.3.1-cp38-cp38-win32.whl", hash = "sha256:f42fa2fa6f2cd9e3673a3bd7469439f5bea0ee86456706db1b50dc20b10682a6"},
- {file = "ddtrace-2.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:2a3ad8e53c45c3329f939fe921714dfe76f5737e48f5b37a5422b1573a20ce44"},
- {file = "ddtrace-2.3.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:5adff6a5d60239e64062ad5efb72631c47c7fb8310ebea6d817f0208a7585074"},
- {file = "ddtrace-2.3.1-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:84012bc7d27dd3c4cd591bbaf0a0cc0413ebc6c838637ca5a76bacb354e2518f"},
- {file = "ddtrace-2.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc2596b26701c9e3a362195f79ddcf54b491a8ea13277ed16697da9ad943646"},
- {file = "ddtrace-2.3.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:986113f7eb4d8a8e87216b55e6cc40b578f84a5730241822af3f34cc61e42710"},
- {file = "ddtrace-2.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd0cdbc6d81e556b6af0875b0bb2ac77d3cf0a0c5da8faa014da1936e1e0adc2"},
- {file = "ddtrace-2.3.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9e8eb17ef8ca2fc9464216290969cff3bbf8df00860ebb219328804125b43bd1"},
- {file = "ddtrace-2.3.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fc21e46c5e9d077022b7634ae247d15d2318cbb347f6756607dfd64ff5941797"},
- {file = "ddtrace-2.3.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:51bf7e3e5c80ef0daadd22c26e7c24c90fc4b4a7662dec1a3d9d8e0db68f3c09"},
- {file = "ddtrace-2.3.1-cp39-cp39-win32.whl", hash = "sha256:2a5f040c0eb101f82a9cd8b8b0279e8583bb0a62fd39b879197d53b71a5d6dbe"},
- {file = "ddtrace-2.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:ff708683becb18771cb31ae5fb5d1430ac5031a082106e0dabac46a1fd6f832e"},
- {file = "ddtrace-2.3.1.tar.gz", hash = "sha256:273a0e98f93e7231708b30067768d80df9bc93a505de93500f30c6da24b70a7b"},
+ {file = "ddtrace-2.4.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:90641de597d3424573aa96263509800bb64018727bf74e29e250e6d21200a4be"},
+ {file = "ddtrace-2.4.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:75b7d01af5fb8d279a2edb56d48af0dc221ed43f4e5049387e4a9be529217033"},
+ {file = "ddtrace-2.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f104933ffbae735887e10e3e0d9a5d28dd7d42d1fd86141c4fa171c07598b561"},
+ {file = "ddtrace-2.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d675545d2fd7c5be10fe704a3f151add0ce8b101c976ca0ab452699aac0d8489"},
+ {file = "ddtrace-2.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b380dabf377a318ebd909423293b02beaa43ffda03ad129a5a93c4a1a4b5c6"},
+ {file = "ddtrace-2.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d2f93337c1546404967525388a45174481daa72ecf7d3a1e4c21349e1a2d572c"},
+ {file = "ddtrace-2.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0e345e034e8962d76642ab2763f5bdb1bc4424c2ea17d9ca5f82e093160d6ca1"},
+ {file = "ddtrace-2.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1aa5e1a7121d08d50795e3f6218f3959cfa55363a3896210410ef354a7573de9"},
+ {file = "ddtrace-2.4.0-cp310-cp310-win32.whl", hash = "sha256:d9c69a42919a27cff8d42461b301014d79683c40f60d0cb5f3000e4ff7cb907f"},
+ {file = "ddtrace-2.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:962de6a60f42e2cde1823c47a3383bb0d6beaa954d57b12687688935d0ddd3d3"},
+ {file = "ddtrace-2.4.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:ed91c32353c8288fb95de67faa341c5ab9a089c0161ad51fc739f0db2b46866e"},
+ {file = "ddtrace-2.4.0-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:410c9b9241ed2514dc9413887d852140cc7ff396b40ffc412835a14668b9b1a3"},
+ {file = "ddtrace-2.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639b11f780d0ed1a372a2a6b92cc1b9c586a0fea27439557e768d5ebedabbc34"},
+ {file = "ddtrace-2.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08861e4acd61198428f0d994db1bc5d2893ec816b9cd78c0c6d1fc963f0dc771"},
+ {file = "ddtrace-2.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad627a4611bff8f527e2c0c0fc51be9d74a563328269f53b871901570ee4ff3"},
+ {file = "ddtrace-2.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6ae2f75f2edc068d6c104ceb0e882a6dfad8f702b27384b3dac5290aebbc248"},
+ {file = "ddtrace-2.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:82a0832000fedcb95856477bab95c6f151fa28ede3aceafaabe7c08beffaa577"},
+ {file = "ddtrace-2.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8b1baac10f9cc3803854f802062e02ae5de0d5546f19165c3b6e8840e9b09f4"},
+ {file = "ddtrace-2.4.0-cp311-cp311-win32.whl", hash = "sha256:c687fe20b17e2d24de222913dc2383e6b1462641d8ff18d27678dcb72ced82a3"},
+ {file = "ddtrace-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:47296b116a97e01fe6bf48a4eea4e825212ee23288ee064964ab87ba608fc038"},
+ {file = "ddtrace-2.4.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:6e2b2b4160ea53dd3e4f8bb35af7124a5e8954c8badffa81468c8a62d12acc51"},
+ {file = "ddtrace-2.4.0-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:49ac0d69f98a4ff2175db39481598300fd94f038a027b537d0a66d9dbeca1ed7"},
+ {file = "ddtrace-2.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2604e1c91b44d3b6fb15d0337cda1ac2c15aec215f6a44e1bb39d25b47c2633c"},
+ {file = "ddtrace-2.4.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb7d2c846e3d7e8156199855d4db014a71d62daedba84a213416e2a488e834b3"},
+ {file = "ddtrace-2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85774e12d5d92152cd1c64f3a8a2f4dbe7f3d39201f8a8ff5e914b9639fe6e17"},
+ {file = "ddtrace-2.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:418c0c990c505accc8665bfc056f4297938a54176157bf1f0765f2fae584efec"},
+ {file = "ddtrace-2.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:183f7c3ddd9a2891bd1b6f5ea3af6d16517775268b3940259820ca3c83292d16"},
+ {file = "ddtrace-2.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:eb90e71b70e3ea6c24711cfb5c48c711a2175c315daf07f4f28903aa773a48b7"},
+ {file = "ddtrace-2.4.0-cp312-cp312-win32.whl", hash = "sha256:5eab75f1d4170c41de1f9c32e7e39714b2dd11a59d9ff7e94a199b88fa813ecd"},
+ {file = "ddtrace-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:d892e0b71f3b6bcf31920b5e7fd699c86aea734bc02eec3c1b22acd8f63057e4"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:c07ea7a17a2897d891ee5e95de3b0e4f57184c471e87ffcc7208b3ccd68b9fcc"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05b28815e65d6361cd056c877ab051e132a6929b0d353313a499122e6522ea3"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63719bfc8fe5e8510022a3275145d6b2b1c4f955c395698fb792d99d4cda698d"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:190f96eccdd8107cc93db6e79af4b8fc9403418c823d895af898cf635f5cada6"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b0fdb6a2fe0eadd122df4ea3a11690cb88f4f642bd19b1a21d01e9dcfd6eb20c"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1b2bf18ee10ea8fe668096a6c70db4161e228edee161b04719506947d7117937"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ca5fa396b8df0d7b55ad9e8d5b19be09c5dedefa388bf7590340ace5ce392e14"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-win32.whl", hash = "sha256:c67a4d8767aa269f8dfab79ae39b8170b95de6813bd1cba17dc951f0a1ee462b"},
+ {file = "ddtrace-2.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1db7931541052622a91c8c6594b274d96efe956d5dbbe09c57a50c0f74640b52"},
+ {file = "ddtrace-2.4.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:a8b6ab9f26d2ea50dfa69a282d727c865461f0c1b535f973922072f700cde031"},
+ {file = "ddtrace-2.4.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:9ad7aa89988b77b893c3e9898fc48e3cef9471bc2648d6a83cc800b49cad1f1f"},
+ {file = "ddtrace-2.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38b95920bcc17289a0e3871830ef19030df763039021a796a1debb7fd4ea347b"},
+ {file = "ddtrace-2.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9152dcc4b8a98392ce5853b8e160f8d215ddd148337d42861ab3c12635b32b75"},
+ {file = "ddtrace-2.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c335be0ab8f4f376f51111219a9d85bcdbd6d75c18a8d5471817645bed1430c0"},
+ {file = "ddtrace-2.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0c95339694034d4fbf9e1b2a0918f99b3936336e8deb4d513e9cf7a6ae1532f3"},
+ {file = "ddtrace-2.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f8bddc5e84e50663b64fbad2e2c61203484dea06de7759a47f096514d99f5c8f"},
+ {file = "ddtrace-2.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af7c4c94959481bc4060c7dfb5f7e70b1929b18089c7ea0329fc3f28707fd8a"},
+ {file = "ddtrace-2.4.0-cp38-cp38-win32.whl", hash = "sha256:de3fcca4747340c835e7816009dd363d4e02dc5fc25365b2418dc3d986a6550a"},
+ {file = "ddtrace-2.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:2f3dbcff2b305d34ecc63db05d0efeb923846ba07871be6f0a3509a33290fb69"},
+ {file = "ddtrace-2.4.0-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:7b43e2e890e868a133afc25f57774bb6bc8ae8841094cba4e8f2b3ee50f9c7ee"},
+ {file = "ddtrace-2.4.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:de66ea03ca5b3f02d0f878fc9d486d4d4f654cf66b38d3fdf73bf314fc0e3f5b"},
+ {file = "ddtrace-2.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01cba8d20d4754135411e0e3398af02bc29b3c5f3dc85b1ee8cdfb9a0532f793"},
+ {file = "ddtrace-2.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb324809582b65baa682f045cb2873d686de3aa93cac75718462d0a23f980836"},
+ {file = "ddtrace-2.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f475ea4e2086e6a16a48568688918b21043ba391a6f968cb9bc17ec70d51de75"},
+ {file = "ddtrace-2.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f1d4a5d9c89db2cc0e4a6eaf10b6d1af449d1ef14060000b23eceee19497705e"},
+ {file = "ddtrace-2.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a057db38d52271b6206bac2ab23f2a36cbe547397cba1ce586021df711570559"},
+ {file = "ddtrace-2.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:45ee78018276764f7fdaf1cf3b945660cf1ab39e1a03e0c61bf1984a71562204"},
+ {file = "ddtrace-2.4.0-cp39-cp39-win32.whl", hash = "sha256:4f63dea207c90bb2c2d52ff9de0ee71b27aedb5d8540745e4e0b38a896737de0"},
+ {file = "ddtrace-2.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:e3523c71d37fb3135d0817e92b486bcee7829c41e5465ed41b080286d7e2739d"},
+ {file = "ddtrace-2.4.0.tar.gz", hash = "sha256:fb1bab23debb3a1fb71e3d6a1ce9818bc5e6ad9b885b901f78f3f28639393ecb"},
]
[package.dependencies]
@@ -968,15 +982,50 @@ wrapt = ">=1.10,<2"
[package.extras]
dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"]
+[[package]]
+name = "deprecation"
+version = "2.1.0"
+description = "A library to handle automated deprecations"
+optional = false
+python-versions = "*"
+files = [
+ {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"},
+ {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"},
+]
+
+[package.dependencies]
+packaging = "*"
+
+[[package]]
+name = "docker"
+version = "6.1.3"
+description = "A Python library for the Docker Engine API."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "docker-6.1.3-py3-none-any.whl", hash = "sha256:aecd2277b8bf8e506e484f6ab7aec39abe0038e29fa4a6d3ba86c3fe01844ed9"},
+ {file = "docker-6.1.3.tar.gz", hash = "sha256:aa6d17830045ba5ef0168d5eaa34d37beeb113948c413affe1d5991fc11f9a20"},
+]
+
+[package.dependencies]
+packaging = ">=14.0"
+pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""}
+requests = ">=2.26.0"
+urllib3 = ">=1.26.0"
+websocket-client = ">=0.32.0"
+
+[package.extras]
+ssh = ["paramiko (>=2.4.3)"]
+
[[package]]
name = "envier"
-version = "0.4.0"
+version = "0.5.0"
description = "Python application configuration via the environment"
optional = false
-python-versions = ">=2.7"
+python-versions = ">=3.7"
files = [
- {file = "envier-0.4.0-py3-none-any.whl", hash = "sha256:7b91af0f16ea3e56d91ec082f038987e81b441fc19c657a8b8afe0909740a706"},
- {file = "envier-0.4.0.tar.gz", hash = "sha256:e68dcd1ed67d8b6313883e27dff3e701b7fba944d2ed4b7f53d0cc2e12364a82"},
+ {file = "envier-0.5.0-py3-none-any.whl", hash = "sha256:5fed6099ee5d7ad4cf664f8bb99d1281d4ab5fadeec8f40ba9458610938293be"},
+ {file = "envier-0.5.0.tar.gz", hash = "sha256:f35ca8605f0c70c2c0367133af9dc1ef16710021dbd0e28c1b0a83070db06768"},
]
[package.extras]
@@ -1201,17 +1250,6 @@ files = [
{file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"},
{file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"},
{file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"},
- {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:055b71bbc37af5c3c5861afe789e15211d2d3d06ac51ee5a647adf4def19c0ea"},
- {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c075a547de32f265a5dd139ab2035900fef6653951628862e5cdce0d101af557"},
- {file = "ijson-3.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:457f8a5fc559478ac6b06b6d37ebacb4811f8c5156e997f0d87d708b0d8ab2ae"},
- {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9788f0c915351f41f0e69ec2618b81ebfcf9f13d9d67c6d404c7f5afda3e4afb"},
- {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa234ab7a6a33ed51494d9d2197fb96296f9217ecae57f5551a55589091e7853"},
- {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd0dc5da4f9dc6d12ab6e8e0c57d8b41d3c8f9ceed31a99dae7b2baf9ea769a"},
- {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c6beb80df19713e39e68dc5c337b5c76d36ccf69c30b79034634e5e4c14d6904"},
- {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a2973ce57afb142d96f35a14e9cfec08308ef178a2c76b8b5e1e98f3960438bf"},
- {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:105c314fd624e81ed20f925271ec506523b8dd236589ab6c0208b8707d652a0e"},
- {file = "ijson-3.2.3-cp312-cp312-win32.whl", hash = "sha256:ac44781de5e901ce8339352bb5594fcb3b94ced315a34dbe840b4cff3450e23b"},
- {file = "ijson-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:0567e8c833825b119e74e10a7c29761dc65fcd155f5d4cb10f9d3b8916ef9912"},
{file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"},
{file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"},
{file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"},
@@ -1588,16 +1626,6 @@ files = [
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
- {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
@@ -2004,13 +2032,13 @@ test = ["codecov (>=2.1)", "pytest (>=6.2)", "pytest-cov (>=2.12)"]
[[package]]
name = "opentelemetry-api"
-version = "1.21.0"
+version = "1.22.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.7"
files = [
- {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"},
- {file = "opentelemetry_api-1.21.0.tar.gz", hash = "sha256:d6185fd5043e000075d921822fd2d26b953eba8ca21b1e2fa360dd46a7686316"},
+ {file = "opentelemetry_api-1.22.0-py3-none-any.whl", hash = "sha256:43621514301a7e9f5d06dd8013a1b450f30c2e9372b8e30aaeb4562abf2ce034"},
+ {file = "opentelemetry_api-1.22.0.tar.gz", hash = "sha256:15ae4ca925ecf9cfdfb7a709250846fbb08072260fca08ade78056c502b86bed"},
]
[package.dependencies]
@@ -2170,7 +2198,7 @@ files = [
name = "pycparser"
version = "2.21"
description = "C parser in Python"
-optional = true
+optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
@@ -2465,6 +2493,29 @@ files = [
{file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"},
]
+[[package]]
+name = "pywin32"
+version = "306"
+description = "Python for Window Extensions"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"},
+ {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"},
+ {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"},
+ {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"},
+ {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"},
+ {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"},
+ {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"},
+ {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"},
+ {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"},
+ {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"},
+ {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"},
+ {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"},
+ {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"},
+ {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"},
+]
+
[[package]]
name = "pyyaml"
version = "6.0.1"
@@ -2477,7 +2528,6 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
- {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@@ -2485,15 +2535,8 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
- {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
- {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
- {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
- {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@@ -2510,7 +2553,6 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
- {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@@ -2518,7 +2560,6 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
- {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@@ -2556,6 +2597,26 @@ mando = ">=0.6,<0.8"
[package.extras]
toml = ["tomli (>=2.0.1)"]
+[[package]]
+name = "redis"
+version = "4.6.0"
+description = "Python client for Redis database and key-value store"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "redis-4.6.0-py3-none-any.whl", hash = "sha256:e2b03db868160ee4591de3cb90d40ebb50a90dd302138775937f6a42b7ed183c"},
+ {file = "redis-4.6.0.tar.gz", hash = "sha256:585dc516b9eb042a619ef0a39c3d7d55fe81bdb4df09a52c9cdde0d07bf1aa7d"},
+]
+
+[package.dependencies]
+async-timeout = {version = ">=4.0.2", markers = "python_full_version <= \"3.11.2\""}
+importlib-metadata = {version = ">=1.0", markers = "python_version < \"3.8\""}
+typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
+
+[package.extras]
+hiredis = ["hiredis (>=1.0.0)"]
+ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"]
+
[[package]]
name = "regex"
version = "2022.10.31"
@@ -2811,13 +2872,13 @@ tornado = ["tornado (>=5)"]
[[package]]
name = "setuptools"
-version = "69.0.2"
+version = "69.0.3"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"},
- {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"},
+ {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"},
+ {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"},
]
[package.extras]
@@ -2887,6 +2948,40 @@ files = [
[package.dependencies]
mpmath = ">=0.19"
+[[package]]
+name = "testcontainers"
+version = "3.7.1"
+description = "Library provides lightweight, throwaway instances of common databases, Selenium web browsers, or anything else that can run in a Docker container"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "testcontainers-3.7.1-py2.py3-none-any.whl", hash = "sha256:7f48cef4bf0ccd78f1a4534d4b701a003a3bace851f24eae58a32f9e3f0aeba0"},
+]
+
+[package.dependencies]
+deprecation = "*"
+docker = ">=4.0.0"
+redis = {version = "*", optional = true, markers = "extra == \"redis\""}
+wrapt = "*"
+
+[package.extras]
+arangodb = ["python-arango"]
+azurite = ["azure-storage-blob"]
+clickhouse = ["clickhouse-driver"]
+docker-compose = ["docker-compose"]
+google-cloud-pubsub = ["google-cloud-pubsub (<2)"]
+kafka = ["kafka-python"]
+keycloak = ["python-keycloak"]
+mongo = ["pymongo"]
+mssqlserver = ["pymssql"]
+mysql = ["pymysql", "sqlalchemy"]
+neo4j = ["neo4j"]
+oracle = ["cx-Oracle", "sqlalchemy"]
+postgresql = ["psycopg2-binary", "sqlalchemy"]
+rabbitmq = ["pika"]
+redis = ["redis"]
+selenium = ["selenium"]
+
[[package]]
name = "tomli"
version = "2.0.1"
@@ -2963,6 +3058,20 @@ files = [
doc = ["sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["mypy", "pytest", "typing-extensions"]
+[[package]]
+name = "types-pyopenssl"
+version = "23.3.0.0"
+description = "Typing stubs for pyOpenSSL"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "types-pyOpenSSL-23.3.0.0.tar.gz", hash = "sha256:5ffb077fe70b699c88d5caab999ae80e192fe28bf6cda7989b7e79b1e4e2dcd3"},
+ {file = "types_pyOpenSSL-23.3.0.0-py3-none-any.whl", hash = "sha256:00171433653265843b7469ddb9f3c86d698668064cc33ef10537822156130ebf"},
+]
+
+[package.dependencies]
+cryptography = ">=35.0.0"
+
[[package]]
name = "types-python-dateutil"
version = "2.8.19.14"
@@ -2974,6 +3083,21 @@ files = [
{file = "types_python_dateutil-2.8.19.14-py3-none-any.whl", hash = "sha256:f977b8de27787639986b4e28963263fd0e5158942b3ecef91b9335c130cb1ce9"},
]
+[[package]]
+name = "types-redis"
+version = "4.6.0.11"
+description = "Typing stubs for redis"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "types-redis-4.6.0.11.tar.gz", hash = "sha256:c8cfc84635183deca2db4a528966c5566445fd3713983f0034fb0f5a09e0890d"},
+ {file = "types_redis-4.6.0.11-py3-none-any.whl", hash = "sha256:94fc61118601fb4f79206b33b9f4344acff7ca1d7bba67834987fb0efcf6a770"},
+]
+
+[package.dependencies]
+cryptography = ">=35.0.0"
+types-pyOpenSSL = "*"
+
[[package]]
name = "types-requests"
version = "2.31.0.6"
@@ -2988,6 +3112,20 @@ files = [
[package.dependencies]
types-urllib3 = "*"
+[[package]]
+name = "types-requests"
+version = "2.31.0.20231231"
+description = "Typing stubs for requests"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "types-requests-2.31.0.20231231.tar.gz", hash = "sha256:0f8c0c9764773384122813548d9eea92a5c4e1f33ed54556b508968ec5065cee"},
+ {file = "types_requests-2.31.0.20231231-py3-none-any.whl", hash = "sha256:2e2230c7bc8dd63fa3153c1c0ae335f8a368447f0582fc332f17d54f88e69027"},
+]
+
+[package.dependencies]
+urllib3 = ">=2"
+
[[package]]
name = "types-urllib3"
version = "1.26.25.14"
@@ -3096,6 +3234,22 @@ files = [
[package.extras]
watchmedo = ["PyYAML (>=3.10)"]
+[[package]]
+name = "websocket-client"
+version = "1.6.1"
+description = "WebSocket client for Python with low level API options"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "websocket-client-1.6.1.tar.gz", hash = "sha256:c951af98631d24f8df89ab1019fc365f2227c0892f12fd150e935607c79dd0dd"},
+ {file = "websocket_client-1.6.1-py3-none-any.whl", hash = "sha256:f1f9f2ad5291f0225a49efad77abf9e700b6fef553900623060dad6e26503b9d"},
+]
+
+[package.extras]
+docs = ["Sphinx (>=3.4)", "sphinx-rtd-theme (>=0.5)"]
+optional = ["python-socks", "wsaccel"]
+test = ["websockets"]
+
[[package]]
name = "wrapt"
version = "1.16.0"
@@ -3218,15 +3372,16 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker
testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
[extras]
-all = ["aws-xray-sdk", "fastjsonschema", "pydantic"]
+all = ["aws-xray-sdk", "fastjsonschema", "pydantic", "redis"]
aws-sdk = ["boto3"]
datadog = ["datadog-lambda"]
datamasking-aws-sdk = ["aws-encryption-sdk"]
parser = ["pydantic"]
+redis = ["redis"]
tracer = ["aws-xray-sdk"]
validation = ["fastjsonschema"]
[metadata]
lock-version = "2.0"
python-versions = "^3.7.4"
-content-hash = "49c3f986d5d933bfd740bb6c3dc9860df5cdc65f87fc46aa618dccbe1395452a"
+content-hash = "9b97beba800d18f25b62720d5ae5f0e323171fe0b90cb40f51ac7c6d8b5991dd"
diff --git a/pyproject.toml b/pyproject.toml
index af01707e09..cdb7ab83ea 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,6 +44,7 @@ aws-xray-sdk = { version = "^2.8.0", optional = true }
fastjsonschema = { version = "^2.14.5", optional = true }
pydantic = { version = "^1.8.2", optional = true }
boto3 = { version = "^1.20.32", optional = true }
+redis = {version = "^4.4.0", optional = true}
typing-extensions = "^4.6.2"
datadog-lambda = { version = ">=4.77,<6.0", optional = true }
aws-encryption-sdk = { version = "^3.1.1", optional = true }
@@ -95,6 +96,7 @@ datadog-lambda = "^5.85.0"
parser = ["pydantic"]
validation = ["fastjsonschema"]
tracer = ["aws-xray-sdk"]
+redis = ["redis"]
all = ["pydantic", "aws-xray-sdk", "fastjsonschema"]
# allow customers to run code locally without emulators (SAM CLI, etc.)
aws-sdk = ["boto3"]
@@ -110,6 +112,8 @@ sentry-sdk = "^1.22.2"
ruff = ">=0.0.272,<0.1.12"
retry2 = "^0.9.5"
pytest-socket = "^0.6.0"
+types-redis = "^4.6.0.7"
+testcontainers = {extras = ["redis"], version = "^3.7.1"}
[tool.coverage.run]
source = ["aws_lambda_powertools"]
diff --git a/tests/e2e/idempotency_redis/__init__.py b/tests/e2e/idempotency_redis/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/e2e/idempotency_redis/conftest.py b/tests/e2e/idempotency_redis/conftest.py
new file mode 100644
index 0000000000..65cffcd194
--- /dev/null
+++ b/tests/e2e/idempotency_redis/conftest.py
@@ -0,0 +1,19 @@
+import pytest
+
+from tests.e2e.idempotency_redis.infrastructure import IdempotencyRedisServerlessStack
+
+
+@pytest.fixture(autouse=True, scope="package")
+def infrastructure():
+ """Setup and teardown logic for E2E test infrastructure
+
+ Yields
+ ------
+ Dict[str, str]
+ CloudFormation Outputs from deployed infrastructure
+ """
+ stack = IdempotencyRedisServerlessStack()
+ try:
+ yield stack.deploy()
+ finally:
+ stack.delete()
diff --git a/tests/e2e/idempotency_redis/handlers/function_thread_safety_handler.py b/tests/e2e/idempotency_redis/handlers/function_thread_safety_handler.py
new file mode 100644
index 0000000000..6d326c17e4
--- /dev/null
+++ b/tests/e2e/idempotency_redis/handlers/function_thread_safety_handler.py
@@ -0,0 +1,29 @@
+import os
+import time
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from threading import current_thread
+
+from aws_lambda_powertools.utilities.idempotency import (
+ idempotent_function,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import RedisCachePersistenceLayer
+
+REDIS_HOST = os.getenv("RedisEndpoint", "")
+persistence_layer = RedisCachePersistenceLayer(host=REDIS_HOST, port=6379)
+threads_count = 2
+
+
+@idempotent_function(persistence_store=persistence_layer, data_keyword_argument="record")
+def record_handler(record):
+ time_now = time.time()
+ return {"thread_name": current_thread().name, "time": str(time_now)}
+
+
+def lambda_handler(event, context):
+ with ThreadPoolExecutor(max_workers=threads_count) as executor:
+ futures = [executor.submit(record_handler, **{"record": i}) for i in range(threads_count)]
+
+ return [
+ {"state": future._state, "exception": future.exception(), "output": future.result()}
+ for future in as_completed(futures)
+ ]
diff --git a/tests/e2e/idempotency_redis/handlers/optional_idempotency_key_handler.py b/tests/e2e/idempotency_redis/handlers/optional_idempotency_key_handler.py
new file mode 100644
index 0000000000..75be8b3129
--- /dev/null
+++ b/tests/e2e/idempotency_redis/handlers/optional_idempotency_key_handler.py
@@ -0,0 +1,17 @@
+import os
+import uuid
+
+from aws_lambda_powertools.utilities.idempotency import (
+ IdempotencyConfig,
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import RedisCachePersistenceLayer
+
+REDIS_HOST = os.getenv("RedisEndpoint", "")
+persistence_layer = RedisCachePersistenceLayer(host=REDIS_HOST, port=6379)
+config = IdempotencyConfig(event_key_jmespath='headers."X-Idempotency-Key"', use_local_cache=True)
+
+
+@idempotent(config=config, persistence_store=persistence_layer)
+def lambda_handler(event, context):
+ return {"request": str(uuid.uuid4())}
diff --git a/tests/e2e/idempotency_redis/handlers/parallel_execution_handler.py b/tests/e2e/idempotency_redis/handlers/parallel_execution_handler.py
new file mode 100644
index 0000000000..c28f84f746
--- /dev/null
+++ b/tests/e2e/idempotency_redis/handlers/parallel_execution_handler.py
@@ -0,0 +1,17 @@
+import os
+import time
+
+from aws_lambda_powertools.utilities.idempotency import (
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import RedisCachePersistenceLayer
+
+REDIS_HOST = os.getenv("RedisEndpoint", "")
+persistence_layer = RedisCachePersistenceLayer(host=REDIS_HOST, port=6379)
+
+
+@idempotent(persistence_store=persistence_layer)
+def lambda_handler(event, context):
+ time.sleep(5)
+
+ return event
diff --git a/tests/e2e/idempotency_redis/handlers/ttl_cache_expiration_handler.py b/tests/e2e/idempotency_redis/handlers/ttl_cache_expiration_handler.py
new file mode 100644
index 0000000000..a93413e157
--- /dev/null
+++ b/tests/e2e/idempotency_redis/handlers/ttl_cache_expiration_handler.py
@@ -0,0 +1,19 @@
+import os
+import time
+
+from aws_lambda_powertools.utilities.idempotency import (
+ IdempotencyConfig,
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import RedisCachePersistenceLayer
+
+REDIS_HOST = os.getenv("RedisEndpoint", "")
+persistence_layer = RedisCachePersistenceLayer(host=REDIS_HOST, port=6379)
+config = IdempotencyConfig(expires_after_seconds=5)
+
+
+@idempotent(config=config, persistence_store=persistence_layer)
+def lambda_handler(event, context):
+ time_now = time.time()
+
+ return {"time": str(time_now)}
diff --git a/tests/e2e/idempotency_redis/handlers/ttl_cache_timeout_handler.py b/tests/e2e/idempotency_redis/handlers/ttl_cache_timeout_handler.py
new file mode 100644
index 0000000000..56c2372b2c
--- /dev/null
+++ b/tests/e2e/idempotency_redis/handlers/ttl_cache_timeout_handler.py
@@ -0,0 +1,20 @@
+import os
+import time
+
+from aws_lambda_powertools.utilities.idempotency import (
+ IdempotencyConfig,
+ idempotent,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import RedisCachePersistenceLayer
+
+REDIS_HOST = os.getenv("RedisEndpoint", "")
+persistence_layer = RedisCachePersistenceLayer(host=REDIS_HOST, port=6379)
+config = IdempotencyConfig(expires_after_seconds=1)
+
+
+@idempotent(config=config, persistence_store=persistence_layer)
+def lambda_handler(event, context):
+ sleep_time: int = event.get("sleep") or 0
+ time.sleep(sleep_time)
+
+ return event
diff --git a/tests/e2e/idempotency_redis/infrastructure.py b/tests/e2e/idempotency_redis/infrastructure.py
new file mode 100644
index 0000000000..8034731a35
--- /dev/null
+++ b/tests/e2e/idempotency_redis/infrastructure.py
@@ -0,0 +1,91 @@
+import time
+from typing import Tuple
+
+from aws_cdk import aws_ec2 as ec2
+from aws_cdk.aws_ec2 import (
+ SecurityGroup,
+ SubnetType,
+ Vpc,
+)
+from aws_cdk.aws_elasticache import (
+ CfnServerlessCache,
+)
+
+from tests.e2e.utils.data_builder import build_random_value
+from tests.e2e.utils.infrastructure import BaseInfrastructure
+
+
+class IdempotencyRedisServerlessStack(BaseInfrastructure):
+ def create_resources(self) -> None:
+ service_name = build_random_value(10)
+
+ vpc_stack: Vpc = self._create_vpc(service_name, "172.150.0.0/16")
+ security_groups: Tuple = self._create_security_groups(vpc_stack)
+ redis_cluster: CfnServerlessCache = self._create_redis_cache(service_name, vpc_stack, security_groups[0])
+
+ env_vars = {"RedisEndpoint": f"{str(redis_cluster.attr_endpoint_address)}"}
+
+ self.create_lambda_functions(
+ function_props={
+ "environment": env_vars,
+ "vpc": vpc_stack,
+ "security_groups": [security_groups[1]],
+ },
+ )
+
+ def _create_vpc(self, service_name: str, cidr: str) -> Vpc:
+ vpc_stack: Vpc = Vpc(
+ self.stack,
+ "VPC-ServerlessCache",
+ nat_gateways=1,
+ vpc_name=f"VPC-ServerlessCache-{service_name}",
+ ip_addresses=ec2.IpAddresses.cidr(cidr),
+ subnet_configuration=[
+ ec2.SubnetConfiguration(name="public", subnet_type=SubnetType.PUBLIC, cidr_mask=24),
+ ec2.SubnetConfiguration(name="private", subnet_type=SubnetType.PRIVATE_WITH_EGRESS, cidr_mask=24),
+ ],
+ max_azs=2,
+ )
+
+ return vpc_stack
+
+ def _create_security_groups(self, vpc_stack: Vpc) -> Tuple[SecurityGroup, SecurityGroup]:
+ # Create a security group for the ElastiCache cluster
+ cache_security_group: SecurityGroup = SecurityGroup(self.stack, "ElastiCacheSecurityGroup", vpc=vpc_stack)
+ cache_security_group.add_ingress_rule(
+ peer=ec2.Peer.ipv4(vpc_stack.vpc_cidr_block),
+ connection=ec2.Port.tcp(6379),
+ description="Allow inbound traffic from VPC",
+ )
+
+ lambda_security_group = SecurityGroup(
+ self.stack,
+ "LambdaSecurityGroup",
+ vpc=vpc_stack,
+ allow_all_ipv6_outbound=True,
+ allow_all_outbound=True,
+ )
+
+ return cache_security_group, lambda_security_group
+
+ def _create_redis_cache(
+ self,
+ service_name: str,
+ vpc_stack: Vpc,
+ cache_security_group: SecurityGroup,
+ ) -> CfnServerlessCache:
+ cache_cluster = CfnServerlessCache(
+ self.stack,
+ "ElastiCacheCluster",
+ engine="redis",
+ security_group_ids=[cache_security_group.security_group_id],
+ subnet_ids=[subnet.subnet_id for subnet in vpc_stack.private_subnets],
+ serverless_cache_name=f"Cache-{service_name}",
+ )
+
+ # Just to make sure the Cluster will be ready before the Stack is complete
+ while cache_cluster.attr_status == "CREATING":
+ print("Waiting for ElastiCache serverless to be created...")
+ time.sleep(5)
+
+ return cache_cluster
diff --git a/tests/e2e/idempotency_redis/test_idempotency_redis.py b/tests/e2e/idempotency_redis/test_idempotency_redis.py
new file mode 100644
index 0000000000..4b5840ac47
--- /dev/null
+++ b/tests/e2e/idempotency_redis/test_idempotency_redis.py
@@ -0,0 +1,183 @@
+import json
+from time import sleep
+
+import pytest
+
+from tests.e2e.utils import data_fetcher
+from tests.e2e.utils.data_fetcher.common import GetLambdaResponseOptions, get_lambda_response_in_parallel
+
+
+@pytest.fixture
+def ttl_cache_expiration_handler_fn_arn(infrastructure: dict) -> str:
+ return infrastructure.get("TtlCacheExpirationHandlerArn", "")
+
+
+@pytest.fixture
+def ttl_cache_timeout_handler_fn_arn(infrastructure: dict) -> str:
+ return infrastructure.get("TtlCacheTimeoutHandlerArn", "")
+
+
+@pytest.fixture
+def parallel_execution_handler_fn_arn(infrastructure: dict) -> str:
+ return infrastructure.get("ParallelExecutionHandlerArn", "")
+
+
+@pytest.fixture
+def function_thread_safety_handler_fn_arn(infrastructure: dict) -> str:
+ return infrastructure.get("FunctionThreadSafetyHandlerArn", "")
+
+
+@pytest.fixture
+def optional_idempotency_key_fn_arn(infrastructure: dict) -> str:
+ return infrastructure.get("OptionalIdempotencyKeyHandlerArn", "")
+
+
+@pytest.mark.xdist_group(name="idempotency-redis")
+def test_ttl_caching_expiration_idempotency(ttl_cache_expiration_handler_fn_arn: str):
+ # GIVEN
+ payload = json.dumps({"message": "Powertools for AWS Lambda (Python) - TTL 5s"})
+
+ # WHEN
+ # first execution
+ first_execution, _ = data_fetcher.get_lambda_response(
+ lambda_arn=ttl_cache_expiration_handler_fn_arn,
+ payload=payload,
+ )
+ first_execution_response = first_execution["Payload"].read().decode("utf-8")
+
+ # the second execution should return the same response as the first execution
+ second_execution, _ = data_fetcher.get_lambda_response(
+ lambda_arn=ttl_cache_expiration_handler_fn_arn,
+ payload=payload,
+ )
+ second_execution_response = second_execution["Payload"].read().decode("utf-8")
+
+ # wait 8s to expire ttl and execute again, this should return a new response value
+ sleep(8)
+ third_execution, _ = data_fetcher.get_lambda_response(
+ lambda_arn=ttl_cache_expiration_handler_fn_arn,
+ payload=payload,
+ )
+ third_execution_response = third_execution["Payload"].read().decode("utf-8")
+
+ # THEN
+ assert first_execution_response == second_execution_response
+ assert third_execution_response != second_execution_response
+
+
+@pytest.mark.xdist_group(name="idempotency-redis")
+def test_ttl_caching_timeout_idempotency(ttl_cache_timeout_handler_fn_arn: str):
+ # GIVEN
+ payload_timeout_execution = json.dumps(
+ {"sleep": 5, "message": "Powertools for AWS Lambda (Python) - TTL 1s"},
+ sort_keys=True,
+ )
+ payload_working_execution = json.dumps(
+ {"sleep": 0, "message": "Powertools for AWS Lambda (Python) - TTL 1s"},
+ sort_keys=True,
+ )
+
+ # WHEN
+ # first call should fail due to timeout
+ execution_with_timeout, _ = data_fetcher.get_lambda_response(
+ lambda_arn=ttl_cache_timeout_handler_fn_arn,
+ payload=payload_timeout_execution,
+ raise_on_error=False,
+ )
+ execution_with_timeout_response = execution_with_timeout["Payload"].read().decode("utf-8")
+
+ # the second call should work and return the payload
+ execution_working, _ = data_fetcher.get_lambda_response(
+ lambda_arn=ttl_cache_timeout_handler_fn_arn,
+ payload=payload_working_execution,
+ )
+ execution_working_response = execution_working["Payload"].read().decode("utf-8")
+
+ # THEN
+ assert "Task timed out after" in execution_with_timeout_response
+ assert payload_working_execution == execution_working_response
+
+
+@pytest.mark.xdist_group(name="idempotency-redis")
+def test_parallel_execution_idempotency(parallel_execution_handler_fn_arn: str):
+ # GIVEN
+ payload = json.dumps({"message": "Powertools for AWS Lambda (Python) - Parallel execution"})
+
+ invocation_options = [
+ GetLambdaResponseOptions(lambda_arn=parallel_execution_handler_fn_arn, payload=payload, raise_on_error=False),
+ GetLambdaResponseOptions(lambda_arn=parallel_execution_handler_fn_arn, payload=payload, raise_on_error=False),
+ ]
+
+ # WHEN executing Lambdas in parallel
+ execution_result_list = get_lambda_response_in_parallel(invocation_options)
+
+ timeout_execution_response = execution_result_list[0][0]["Payload"].read().decode("utf-8")
+ error_idempotency_execution_response = execution_result_list[1][0]["Payload"].read().decode("utf-8")
+
+ # THEN
+ assert "Execution already in progress with idempotency key" in error_idempotency_execution_response
+ assert "Task timed out after" in timeout_execution_response
+
+
+@pytest.mark.xdist_group(name="idempotency-redis")
+def test_idempotent_function_thread_safety(function_thread_safety_handler_fn_arn: str):
+ # GIVEN
+ payload = json.dumps({"message": "Powertools for AWS Lambda (Python) - Idempotent function thread safety check"})
+
+ # WHEN
+ # first execution
+ first_execution, _ = data_fetcher.get_lambda_response(
+ lambda_arn=function_thread_safety_handler_fn_arn,
+ payload=payload,
+ )
+ first_execution_response = first_execution["Payload"].read().decode("utf-8")
+
+ # the second execution should return the same response as the first execution
+ second_execution, _ = data_fetcher.get_lambda_response(
+ lambda_arn=function_thread_safety_handler_fn_arn,
+ payload=payload,
+ )
+ second_execution_response = second_execution["Payload"].read().decode("utf-8")
+
+ # THEN
+ # Function threads finished without exception AND
+ # first and second execution is the same
+ for function_thread in json.loads(first_execution_response):
+ assert function_thread["state"] == "FINISHED"
+ assert function_thread["exception"] is None
+ assert function_thread["output"] is not None
+
+ # we use set() here because we want to compare the elements regardless of their order in the array
+ assert set(first_execution_response) == set(second_execution_response)
+
+
+@pytest.mark.xdist_group(name="idempotency-redis")
+def test_optional_idempotency_key(optional_idempotency_key_fn_arn: str):
+ # GIVEN two payloads where only one has the expected idempotency key
+ payload = json.dumps({"headers": {"X-Idempotency-Key": "here"}})
+ payload_without = json.dumps({"headers": {}})
+
+ # WHEN
+ # we make one request with an idempotency key
+ first_execution, _ = data_fetcher.get_lambda_response(lambda_arn=optional_idempotency_key_fn_arn, payload=payload)
+ first_execution_response = first_execution["Payload"].read().decode("utf-8")
+
+ # and two others without the idempotency key
+ second_execution, _ = data_fetcher.get_lambda_response(
+ lambda_arn=optional_idempotency_key_fn_arn,
+ payload=payload_without,
+ )
+ second_execution_response = second_execution["Payload"].read().decode("utf-8")
+
+ third_execution, _ = data_fetcher.get_lambda_response(
+ lambda_arn=optional_idempotency_key_fn_arn,
+ payload=payload_without,
+ )
+ third_execution_response = third_execution["Payload"].read().decode("utf-8")
+
+ # THEN
+ # we should treat 2nd and 3rd requests with NULL idempotency key as non-idempotent transactions
+ # that is, no cache, no calls to persistent store, etc.
+ assert first_execution_response != second_execution_response
+ assert first_execution_response != third_execution_response
+ assert second_execution_response != third_execution_response
diff --git a/tests/e2e/utils/lambda_layer/powertools_layer.py b/tests/e2e/utils/lambda_layer/powertools_layer.py
index 0514704867..0bc1dbe97c 100644
--- a/tests/e2e/utils/lambda_layer/powertools_layer.py
+++ b/tests/e2e/utils/lambda_layer/powertools_layer.py
@@ -19,7 +19,7 @@ class LocalLambdaPowertoolsLayer(BaseLocalLambdaLayer):
def __init__(self, output_dir: Path = CDK_OUT_PATH, architecture: Architecture = Architecture.X86_64):
super().__init__(output_dir)
- self.package = f"{SOURCE_CODE_ROOT_PATH}[all]"
+ self.package = f"{SOURCE_CODE_ROOT_PATH}[all,redis]"
self.platform_args = self._resolve_platform(architecture)
self.build_args = f"{self.platform_args} --only-binary=:all: --upgrade"
diff --git a/tests/functional/idempotency/persistence/test_redis_layer.py b/tests/functional/idempotency/persistence/test_redis_layer.py
new file mode 100644
index 0000000000..75db55dba5
--- /dev/null
+++ b/tests/functional/idempotency/persistence/test_redis_layer.py
@@ -0,0 +1,630 @@
+# ruff: noqa
+import copy
+import json
+import time as t
+
+import pytest
+from unittest.mock import patch
+
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+import datetime
+
+from aws_lambda_powertools.utilities.idempotency.persistence.base import (
+ STATUS_CONSTANTS,
+ DataRecord,
+)
+
+from unittest import mock
+from multiprocessing import Process, Manager, Lock
+from aws_lambda_powertools.utilities.idempotency.exceptions import (
+ IdempotencyAlreadyInProgressError,
+ IdempotencyItemAlreadyExistsError,
+ IdempotencyItemNotFoundError,
+ IdempotencyPersistenceConnectionError,
+ IdempotencyPersistenceConfigError,
+ IdempotencyPersistenceConsistencyError,
+ IdempotencyValidationError,
+)
+from aws_lambda_powertools.utilities.idempotency.idempotency import (
+ idempotent,
+ idempotent_function,
+ IdempotencyConfig,
+)
+
+redis_badhost = "badhost"
+
+
+@pytest.fixture
+def lambda_context():
+ class LambdaContext:
+ def __init__(self):
+ self.function_name = "test-func"
+ self.memory_limit_in_mb = 128
+ self.invoked_function_arn = "arn:aws:lambda:eu-west-1:809313241234:function:test-func"
+ self.aws_request_id = "52fdfc07-2182-154f-163f-5f0f9a621d72"
+
+ def get_remaining_time_in_millis(self) -> int:
+ return 1000
+
+ return LambdaContext()
+
+
+class RedisExceptions:
+ class RedisClusterException(Exception):
+ "mock cluster exception"
+
+ class RedisError(Exception):
+ "mock redis exception"
+
+ class ConnectionError(Exception):
+ "mock connection exception"
+
+
+class MockRedisBase:
+ # use this class to test no get_connection_kwargs error
+ exceptions = RedisExceptions
+
+ def __call__(self, *args, **kwargs):
+ if kwargs.get("host") == redis_badhost:
+ raise self.exceptions.ConnectionError
+ self.__dict__.update(kwargs)
+ return self
+
+ @property
+ def Redis(self):
+ self.mode = "standalone"
+ return self
+
+ @property
+ def cluster(self):
+ return self
+
+ @property
+ def RedisCluster(self):
+ self.mode = "cluster"
+ return self
+
+ # use this to mimic Redis error
+ def close(self):
+ self.closed = True
+
+
+class MockRedis(MockRedisBase):
+ def __init__(self, cache: dict = None, mock_latency_ms: int = 0, **kwargs):
+ self.cache = cache or {}
+ self.expire_dict = {}
+ self.acl = {}
+ self.username = ""
+ self.mode = ""
+ self.url = ""
+ self.__dict__.update(kwargs)
+ self.closed = False
+ self.mock_latency_ms = mock_latency_ms
+ self.nx_lock = Lock()
+ super(MockRedis, self).__init__()
+
+ # check_closed is called before every mock redis operation
+ def check_closed(self):
+ if self.mock_latency_ms != 0:
+ t.sleep(self.mock_latency_ms / 1000)
+ if self.closed == False:
+ return
+ if self.mode == "cluster":
+ raise self.exceptions.RedisClusterException
+ raise self.exceptions.RedisError
+
+ def from_url(self, url: str):
+ self.url = url
+ return self
+
+ # not covered by test yet.
+ def expire(self, name, time):
+ self.check_closed()
+ if time != 0:
+ self.expire_dict[name] = t.time() + time
+
+ def auth(self, username, **kwargs):
+ self.username = username
+
+ def delete(self, name):
+ self.check_closed()
+ self.cache.pop(name, {})
+
+ # return None if nx failed, return True if done
+ def set(self, name, value, ex: int = 0, nx: bool = False):
+ # expire existing
+ self.check_closed()
+ if self.expire_dict.get(name, t.time() + 1) < t.time():
+ self.cache.pop(name, {})
+
+ if isinstance(value, str):
+ value = value.encode()
+
+ # nx logic, acquire a lock for multiprocessing safety
+ with self.nx_lock:
+ # key exist, nx mode will just return None
+ if name in self.cache and nx:
+ return None
+
+ # key doesn't exist, set the key
+ self.cache[name] = value
+ self.expire(name, ex)
+ return True
+
+ # return None if not found
+ def get(self, name: str):
+ self.check_closed()
+ if self.expire_dict.get(name, t.time() + 1) < t.time():
+ self.cache.pop(name, {})
+
+ resp = self.cache.get(name, None)
+
+ return resp
+
+
+@pytest.fixture
+def persistence_store_standalone_redis_no_decode():
+ redis_client = MockRedis(
+ host="localhost",
+ port="63005",
+ )
+ return RedisCachePersistenceLayer(client=redis_client)
+
+
+@pytest.fixture
+def persistence_store_standalone_redis():
+ redis_client = MockRedis(
+ host="localhost",
+ port="63005",
+ )
+ return RedisCachePersistenceLayer(client=redis_client)
+
+
+@pytest.fixture
+def orphan_record():
+ return DataRecord(
+ idempotency_key="test_orphan_key",
+ status=STATUS_CONSTANTS["INPROGRESS"],
+ in_progress_expiry_timestamp=int(datetime.datetime.now().timestamp() * 1000 - 1),
+ )
+
+
+@pytest.fixture
+def valid_record():
+ return DataRecord(
+ idempotency_key="test_orphan_key",
+ status=STATUS_CONSTANTS["INPROGRESS"],
+ in_progress_expiry_timestamp=int(datetime.datetime.now().timestamp() * 1000 + 1000),
+ )
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_connection_standalone():
+ # when RedisCachePersistenceLayer is init with the following params
+ redis_conf = {
+ "host": "host",
+ "port": "port",
+ "mode": "standalone",
+ "username": "redis_user",
+ "password": "redis_pass",
+ "db_index": "db_index",
+ }
+ layer = RedisCachePersistenceLayer(**redis_conf)
+ redis_conf["db"] = redis_conf["db_index"]
+ redis_conf.pop("db_index")
+ # then these params should be passed down to mock Redis identically
+ for k, v in redis_conf.items():
+ assert layer.client.__dict__.get(k) == v
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_connection_cluster():
+ # when RedisCachePersistenceLayer is init with the following params
+ redis_conf = {
+ "host": "host",
+ "port": "port",
+ "mode": "cluster",
+ "username": "redis_user",
+ "password": "redis_pass",
+ "db_index": "db_index",
+ }
+ layer = RedisCachePersistenceLayer(**redis_conf)
+ redis_conf["db"] = None
+ redis_conf.pop("db_index")
+
+ # then these params should be passed down to mock Redis identically
+ for k, v in redis_conf.items():
+ assert layer.client.__dict__.get(k) == v
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_connection_conn_error():
+ # when RedisCachePersistenceLayer is init with a bad host
+ # then should raise IdempotencyRedisConnectionError
+ with pytest.raises(IdempotencyPersistenceConnectionError):
+ RedisCachePersistenceLayer(host=redis_badhost)
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_connection_conf_error():
+ # when RedisCachePersistenceLayer is init with a not_supported_mode in mode param
+ # then should raise IdempotencyRedisClientConfigError
+ with pytest.raises(IdempotencyPersistenceConfigError):
+ RedisCachePersistenceLayer(mode="not_supported_mode")
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_key_error():
+ # when RedisCachePersistenceLayer is trying to get a non-exist key
+ # then should raise IdempotencyItemNotFoundError
+ with pytest.raises(IdempotencyItemNotFoundError):
+ layer = RedisCachePersistenceLayer(host="host")
+ layer._get_record(idempotency_key="not_exist")
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_key_corrupted():
+ # when RedisCachePersistenceLayer got a non-json formatted record
+ # then should raise IdempotencyOrphanRecordError
+ with pytest.raises(IdempotencyPersistenceConsistencyError):
+ layer = RedisCachePersistenceLayer(url="sample_url")
+ layer.client.set("corrupted_json", "not_json_string")
+ layer._get_record(idempotency_key="corrupted_json")
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_orphan_record(orphan_record, valid_record):
+ layer = RedisCachePersistenceLayer(host="host")
+ # Given orphan record exist
+ layer._put_in_progress_record(orphan_record)
+ # When we are tyring to update the record
+ layer._put_in_progress_record(valid_record)
+ # Then orphan record will be overwritten
+ assert (
+ layer._get_record(valid_record.idempotency_key).in_progress_expiry_timestamp
+ == valid_record.in_progress_expiry_timestamp
+ )
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_orphan_record_lock(orphan_record, valid_record):
+ layer = RedisCachePersistenceLayer(host="host")
+ # Given orphan record exist, lock also exist
+ layer._put_in_progress_record(orphan_record)
+ layer.client.set("test_orphan_key:lock", "True")
+ # when trying to overwrite the record
+ # Then we should raise IdempotencyItemAlreadyExistsError
+ with pytest.raises(IdempotencyItemAlreadyExistsError):
+ layer._put_in_progress_record(valid_record)
+ # And the record should not be overwritten
+ assert (
+ layer._get_record(valid_record.idempotency_key).in_progress_expiry_timestamp
+ == orphan_record.in_progress_expiry_timestamp
+ )
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_redis_error_in_progress(valid_record):
+ layer = RedisCachePersistenceLayer(host="host", mode="standalone")
+ layer.client.close()
+ # given a Redis is returning RedisError
+ # when trying to save inprogress
+ # then layer should raise RedisExceptions.RedisError
+ with pytest.raises(RedisExceptions.RedisError):
+ layer._put_in_progress_record(valid_record)
+
+
+@mock.patch("aws_lambda_powertools.utilities.idempotency.persistence.redis.redis", MockRedis())
+def test_item_to_datarecord_conversion(valid_record):
+ layer = RedisCachePersistenceLayer(host="host", mode="standalone")
+ item = {
+ "status": STATUS_CONSTANTS["INPROGRESS"],
+ layer.in_progress_expiry_attr: int(datetime.datetime.now().timestamp() * 1000),
+ }
+ # given we have a dict of datarecord
+ # when calling _item_to_data_record
+ record = layer._item_to_data_record(idempotency_key="abc", item=item)
+ # then all valid fields in dict should be copied into data_record
+ assert record.idempotency_key == "abc"
+ assert record.status == STATUS_CONSTANTS["INPROGRESS"]
+ assert record.in_progress_expiry_timestamp == item[layer.in_progress_expiry_attr]
+
+
+def test_idempotent_function_and_lambda_handler_redis_basic(
+ persistence_store_standalone_redis: RedisCachePersistenceLayer,
+ lambda_context,
+):
+ mock_event = {"data": "value"}
+ persistence_layer = persistence_store_standalone_redis
+ expected_result = {"message": "Foo"}
+
+ @idempotent_function(persistence_store=persistence_layer, data_keyword_argument="record")
+ def record_handler(record):
+ return expected_result
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event, context):
+ return expected_result
+
+ # WHEN calling the function
+ fn_result = record_handler(record=mock_event)
+ # WHEN calling lambda handler
+ handler_result = lambda_handler(mock_event, lambda_context)
+ # THEN we expect the function and lambda handler to execute successfully
+ assert fn_result == expected_result
+ assert handler_result == expected_result
+
+
+def test_idempotent_function_and_lambda_handler_redis_cache(
+ persistence_store_standalone_redis: RedisCachePersistenceLayer,
+ lambda_context,
+):
+ mock_event = {"data": "value2"}
+ persistence_layer = persistence_store_standalone_redis
+ result = {"message": "Foo"}
+ expected_result = copy.deepcopy(result)
+
+ @idempotent_function(persistence_store=persistence_layer, data_keyword_argument="record")
+ def record_handler(record):
+ return result
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event, context):
+ return result
+
+ # WHEN calling the function and handler with idempotency
+ fn_result = record_handler(record=mock_event)
+ handler_result = lambda_handler(mock_event, lambda_context)
+ # THEN we expect the function and lambda handler to execute successfully
+ assert fn_result == expected_result
+ assert handler_result == expected_result
+
+ result = {"message": "Bar"}
+ # Given idempotency record already in Redis
+ # When we modified the actual function output and run the second time
+ fn_result2 = record_handler(record=mock_event)
+ handler_result2 = lambda_handler(mock_event, lambda_context)
+ # Then the result should be the same as first time
+ assert fn_result2 == expected_result
+ assert handler_result2 == expected_result
+
+ # Given idempotency record already in Redis
+ # When we modified the actual function output and use a different payload
+ mock_event = {"data": "value3"}
+ fn_result3 = record_handler(record=mock_event)
+ handler_result3 = lambda_handler(mock_event, lambda_context)
+ # Then the result should be the actual function output
+ assert fn_result3 == result
+ assert handler_result3 == result
+
+
+def test_idempotent_function_and_lambda_handler_redis_event_key(
+ persistence_store_standalone_redis: RedisCachePersistenceLayer,
+ lambda_context,
+):
+ mock_event = {"body": '{"user_id":"xyz","time":"1234"}'}
+ persistence_layer = persistence_store_standalone_redis
+ result = {"message": "Foo"}
+ expected_result = copy.deepcopy(result)
+ config = IdempotencyConfig(event_key_jmespath='powertools_json(body).["user_id"]')
+
+ @idempotent(persistence_store=persistence_layer, config=config)
+ def lambda_handler(event, context):
+ return result
+
+ # WHEN calling the function and handler with idempotency and event_key_jmespath config to only verify user_id
+ handler_result = lambda_handler(mock_event, lambda_context)
+ # THEN we expect the function and lambda handler to execute successfully
+ assert handler_result == expected_result
+
+ result = {"message": "Bar"}
+ mock_event = {"body": '{"user_id":"xyz","time":"2345"}'}
+ # Given idempotency record already in Redis
+ # When we modified the actual function output, time in mock event and run the second time
+ handler_result2 = lambda_handler(mock_event, lambda_context)
+ # Then the result should be the same as first time
+ assert handler_result2 == expected_result
+
+
+def test_idempotent_function_and_lambda_handler_redis_validation(
+ persistence_store_standalone_redis: RedisCachePersistenceLayer,
+ lambda_context,
+):
+ mock_event = {"user_id": "xyz", "time": "1234"}
+ persistence_layer = persistence_store_standalone_redis
+ result = {"message": "Foo"}
+ config = IdempotencyConfig(event_key_jmespath="user_id", payload_validation_jmespath="time")
+
+ @idempotent(persistence_store=persistence_layer, config=config)
+ def lambda_handler(event, context):
+ return result
+
+ # WHEN calling the function and handler with idempotency and event_key_jmespath,payload_validation_jmespath
+ lambda_handler(mock_event, lambda_context)
+ # THEN we expect the function and lambda handler to execute successfully
+
+ result = {"message": "Bar"}
+ mock_event = {"user_id": "xyz", "time": "2345"}
+ # Given idempotency record already in Redis
+ # When we modified the payload where validation is on and invoke again.
+ # Then should raise IdempotencyValidationError
+ with pytest.raises(IdempotencyValidationError):
+ lambda_handler(mock_event, lambda_context)
+
+
+def test_idempotent_function_and_lambda_handler_redis_basic_no_decode(
+ persistence_store_standalone_redis_no_decode: RedisCachePersistenceLayer,
+ lambda_context,
+):
+ # GIVEN redis client passed in has decode_responses=False
+ mock_event = {"data": "value-nodecode"}
+ persistence_layer = persistence_store_standalone_redis_no_decode
+ result = {"message": "Foo"}
+ expected_result = copy.deepcopy(result)
+
+ @idempotent_function(persistence_store=persistence_layer, data_keyword_argument="record")
+ def record_handler(record):
+ return result
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event, context):
+ return result
+
+ # WHEN calling the function and handler with idempotency
+ fn_result = record_handler(record=mock_event)
+ handler_result = lambda_handler(mock_event, lambda_context)
+ # THEN we expect the function and lambda handler to execute successfully
+ assert fn_result == expected_result
+ assert handler_result == expected_result
+
+ result = {"message": "Bar"}
+ # Given idempotency record already in Redis
+ # When we modified the actual function output and run the second time
+ fn_result2 = record_handler(record=mock_event)
+ handler_result2 = lambda_handler(mock_event, lambda_context)
+ # Then the result should be the same as first time
+ assert fn_result2 == expected_result
+ assert handler_result2 == expected_result
+
+ # Given idempotency record already in Redis
+ # When we modified the actual function output and use a different payload
+ mock_event = {"data": "value3"}
+ fn_result3 = record_handler(record=mock_event)
+ handler_result3 = lambda_handler(mock_event, lambda_context)
+ # Then the result should be the actual function output
+ assert fn_result3 == result
+ assert handler_result3 == result
+
+
+def test_idempotent_lambda_redis_in_progress(
+ persistence_store_standalone_redis: RedisCachePersistenceLayer,
+ lambda_context,
+):
+ """
+ Test idempotent decorator where lambda_handler is already processing an event with matching event key
+ """
+
+ mock_event = {"data": "value4"}
+ persistence_store = persistence_store_standalone_redis
+ lambda_response = {"foo": "bar"}
+
+ @idempotent(persistence_store=persistence_store)
+ def lambda_handler(event, context):
+ return lambda_response
+
+ # Given in_progress idempotency record already in Redis
+ lambda_handler(mock_event, lambda_context)
+ mock_event = {"data": "value7"}
+ try:
+ persistence_store.save_inprogress(mock_event, 1000)
+ except IdempotencyItemAlreadyExistsError:
+ pass
+ # when invoking with same payload
+ # then should raise IdempotencyAlreadyInProgressError
+ with pytest.raises(IdempotencyAlreadyInProgressError):
+ lambda_handler(mock_event, lambda_context)
+
+
+def test_idempotent_lambda_redis_delete(
+ persistence_store_standalone_redis: RedisCachePersistenceLayer,
+ lambda_context,
+):
+ mock_event = {"data": "test_delete"}
+ persistence_layer = persistence_store_standalone_redis
+ result = {"message": "Foo"}
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event, _):
+ return result
+
+ handler_result = lambda_handler(mock_event, lambda_context)
+ assert handler_result == result
+ # Given the idempotency record from the first run deleted
+ persistence_layer.delete_record(mock_event, IdempotencyItemNotFoundError)
+ result = {"message": "Foo2"}
+ # When lambda hander run for the second time
+ handler_result2 = lambda_handler(mock_event, lambda_context)
+
+ # Then lambda handler should return a actual function output
+ assert handler_result2 == result
+
+
+def test_redis_orphan_record_race_condition(lambda_context):
+ redis_client = MockRedis(
+ host="localhost",
+ port="63005",
+ mock_latency_ms=50,
+ )
+ manager = Manager()
+ # use a thread safe dict
+ redis_client.expire_dict = manager.dict()
+ redis_client.cache = manager.dict()
+ # given a mock redis client with latency, orphan record exists
+ layer = RedisCachePersistenceLayer(client=redis_client)
+
+ mock_event = {"data": "value4"}
+ lambda_response = {"foo": "bar"}
+
+ @idempotent(persistence_store=layer)
+ def lambda_handler(event, context):
+ print("lambda executed")
+ if redis_client.cache.get("exec_count", None) != None:
+ redis_client.cache["exec_count"] += 1
+ return lambda_response
+
+ # run handler for the first time to create a valid record in cache
+ lambda_handler(mock_event, lambda_context)
+ # modify the cache expiration to create the orphan record
+ for key, item in redis_client.cache.items():
+ json_dict = json.loads(item)
+ json_dict["expiration"] = int(t.time()) - 4000
+ redis_client.cache[key] = json.dumps(json_dict).encode()
+ # Given orphan idempotency record with same payload already in Redis
+ # When running two lambda handler at the same time
+ redis_client.cache["exec_count"] = 0
+ p1 = Process(target=lambda_handler, args=(mock_event, lambda_context))
+ p2 = Process(target=lambda_handler, args=(mock_event, lambda_context))
+ p1.start()
+ p2.start()
+ p1.join()
+ p2.join()
+ # Then only one handler will actually run
+ assert redis_client.cache["exec_count"] == 1
+
+
+# race condition on empty record
+def test_redis_race_condition(lambda_context):
+ redis_client = MockRedis(
+ host="localhost",
+ port="63005",
+ mock_latency_ms=50,
+ )
+ manager = Manager()
+ # use a thread safe dict
+ redis_client.expire_dict = manager.dict()
+ redis_client.cache = manager.dict()
+ # given a mock redis client with latency, orphan record exists
+ layer = RedisCachePersistenceLayer(client=redis_client)
+
+ mock_event = {"data": "value4"}
+ lambda_response = {"foo": "bar"}
+
+ @idempotent(persistence_store=layer)
+ def lambda_handler(event, context):
+ print("lambda executed")
+ if redis_client.cache.get("exec_count", None) != None:
+ redis_client.cache["exec_count"] += 1
+ return lambda_response
+
+ # When running two lambda handler at the same time
+ redis_client.cache["exec_count"] = 0
+ p1 = Process(target=lambda_handler, args=(mock_event, lambda_context))
+ p2 = Process(target=lambda_handler, args=(mock_event, lambda_context))
+ p1.start()
+ p2.start()
+ p1.join()
+ p2.join()
+ # Then only one handler will actually run
+ assert redis_client.cache["exec_count"] == 1
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/integration/idempotency/__init__.py b/tests/integration/idempotency/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/integration/idempotency/test_idempotency_redis.py b/tests/integration/idempotency/test_idempotency_redis.py
new file mode 100644
index 0000000000..bfced379db
--- /dev/null
+++ b/tests/integration/idempotency/test_idempotency_redis.py
@@ -0,0 +1,198 @@
+import copy
+
+import pytest
+from testcontainers.redis import RedisContainer
+
+from aws_lambda_powertools.utilities.idempotency.exceptions import (
+ IdempotencyAlreadyInProgressError,
+ IdempotencyItemAlreadyExistsError,
+ IdempotencyItemNotFoundError,
+ IdempotencyPersistenceLayerError,
+)
+from aws_lambda_powertools.utilities.idempotency.idempotency import (
+ idempotent,
+ idempotent_function,
+)
+from aws_lambda_powertools.utilities.idempotency.persistence.redis import (
+ RedisCachePersistenceLayer,
+)
+
+pytest.skip(reason="Integration tests disabled for Redis Idempotency.", allow_module_level=True)
+
+
+@pytest.fixture
+def redis_container_image():
+ return "public.ecr.aws/docker/library/redis:7.2-alpine"
+
+
+@pytest.fixture
+def lambda_context():
+ class LambdaContext:
+ def __init__(self):
+ self.function_name = "test-func"
+ self.memory_limit_in_mb = 128
+ self.invoked_function_arn = "arn:aws:lambda:eu-west-1:809313241234:function:test-func"
+ self.aws_request_id = "52fdfc07-2182-154f-163f-5f0f9a621d72"
+
+ def get_remaining_time_in_millis(self) -> int:
+ return 1000
+
+ return LambdaContext()
+
+
+# test basic
+def test_idempotent_function_and_lambda_handler_redis_basic(
+ lambda_context,
+ redis_container_image,
+):
+ with RedisContainer(image=redis_container_image) as redis_container:
+ redis_client = redis_container.get_client()
+ mock_event = {"data": "value"}
+ persistence_layer = RedisCachePersistenceLayer(client=redis_client)
+ expected_result = {"message": "Foo"}
+
+ @idempotent_function(persistence_store=persistence_layer, data_keyword_argument="record")
+ def record_handler(record):
+ return expected_result
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event, context):
+ return expected_result
+
+ # WHEN calling the function
+ fn_result = record_handler(record=mock_event)
+ # WHEN calling lambda handler
+ handler_result = lambda_handler(mock_event, lambda_context)
+ # THEN we expect the function and lambda handler to execute successfully
+ assert fn_result == expected_result
+ assert handler_result == expected_result
+
+
+def test_idempotent_function_and_lambda_handler_redis_cache(
+ lambda_context,
+ redis_container_image,
+):
+ with RedisContainer(image=redis_container_image) as redis_container:
+ redis_client = redis_container.get_client()
+ mock_event = {"data": "value2"}
+ persistence_layer = RedisCachePersistenceLayer(client=redis_client)
+ result = {"message": "Foo"}
+ expected_result = copy.deepcopy(result)
+
+ @idempotent_function(persistence_store=persistence_layer, data_keyword_argument="record")
+ def record_handler(record):
+ return result
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event, context):
+ return result
+
+ # WHEN calling the function
+ fn_result = record_handler(record=mock_event)
+ # WHEN calling lambda handler
+ handler_result = lambda_handler(mock_event, lambda_context)
+ # THEN we expect the function and lambda handler to execute successfully
+ assert fn_result == expected_result
+ assert handler_result == expected_result
+
+ # modify the return to check if idem cache works
+ result = {"message": "Bar"}
+ fn_result2 = record_handler(record=mock_event)
+ # Second time calling lambda handler, test if same result
+ handler_result2 = lambda_handler(mock_event, lambda_context)
+ assert fn_result2 == expected_result
+ assert handler_result2 == expected_result
+
+ # modify the mock event to check if we got updated result
+ mock_event = {"data": "value3"}
+ fn_result3 = record_handler(record=mock_event)
+ # thrid time calling lambda handler, test if result updated
+ handler_result3 = lambda_handler(mock_event, lambda_context)
+ assert fn_result3 == result
+ assert handler_result3 == result
+
+
+# test idem-inprogress
+def test_idempotent_lambda_redis_in_progress(
+ lambda_context,
+ redis_container_image,
+):
+ """
+ Test idempotent decorator where lambda_handler is already processing an event with matching event key
+ """
+ with RedisContainer(image=redis_container_image) as redis_container:
+ redis_client = redis_container.get_client()
+
+ mock_event = {"data": "value4"}
+ persistence_store = RedisCachePersistenceLayer(client=redis_client)
+ lambda_response = {"foo": "bar"}
+
+ @idempotent(persistence_store=persistence_store)
+ def lambda_handler(event, context):
+ return lambda_response
+
+ # register the context first
+ lambda_handler(mock_event, lambda_context)
+ # save additional to in_progress
+ mock_event = {"data": "value7"}
+ try:
+ persistence_store.save_inprogress(mock_event, 10000)
+ except IdempotencyItemAlreadyExistsError:
+ pass
+
+ with pytest.raises(IdempotencyAlreadyInProgressError):
+ lambda_handler(mock_event, lambda_context)
+
+
+# test -remove
+def test_idempotent_lambda_redis_delete(
+ lambda_context,
+ redis_container_image,
+):
+ with RedisContainer(image=redis_container_image) as redis_container:
+ redis_client = redis_container.get_client()
+ mock_event = {"data": "test_delete"}
+ persistence_layer = RedisCachePersistenceLayer(client=redis_client)
+ result = {"message": "Foo"}
+
+ @idempotent(persistence_store=persistence_layer)
+ def lambda_handler(event, context):
+ return result
+
+ # first run is just to populate function infos for deletion.
+ # delete_record won't work if the function was not run yet. bug maybe?
+ lambda_handler(mock_event, lambda_context)
+ # delete what's might be dirty data
+ persistence_layer.delete_record(mock_event, IdempotencyItemNotFoundError)
+ # run second time to ensure clean result
+ handler_result = lambda_handler(mock_event, lambda_context)
+ assert handler_result == result
+ persistence_layer.delete_record(mock_event, IdempotencyItemNotFoundError)
+ # delete the idem and handler should output new result
+ result = {"message": "Foo2"}
+ handler_result2 = lambda_handler(mock_event, lambda_context)
+
+ assert handler_result2 == result
+
+
+def test_idempotent_lambda_redis_credential(lambda_context, redis_container_image):
+ with RedisContainer(image=redis_container_image) as redis_container:
+ redis_client = redis_container.get_client()
+
+ pwd = "terriblePassword"
+ usr = "test_acl_denial"
+ redis_client.acl_setuser(
+ username=usr,
+ enabled=True,
+ passwords="+" + pwd,
+ keys="*",
+ commands=["+hgetall", "-set"],
+ )
+ redis_client.auth(password=pwd, username=usr)
+
+ @idempotent(persistence_store=RedisCachePersistenceLayer(client=redis_client))
+ def lambda_handler(event, _):
+ return True
+
+ with pytest.raises(IdempotencyPersistenceLayerError):
+ lambda_handler("test_Acl", lambda_context)
diff --git a/tests/unit/test_shared_functions.py b/tests/unit/test_shared_functions.py
index 9232b72527..c8c4bb2afb 100644
--- a/tests/unit/test_shared_functions.py
+++ b/tests/unit/test_shared_functions.py
@@ -1,12 +1,14 @@
import os
import warnings
from dataclasses import dataclass
+from pathlib import Path
import pytest
from pydantic import BaseModel
from aws_lambda_powertools.shared import constants
from aws_lambda_powertools.shared.functions import (
+ abs_lambda_path,
extract_event_from_common_models,
powertools_debug_is_set,
powertools_dev_is_set,
@@ -19,6 +21,11 @@
from aws_lambda_powertools.utilities.parameters.base import DEFAULT_MAX_AGE_SECS
+@pytest.fixture
+def default_lambda_path():
+ return "/var/task"
+
+
def test_resolve_env_var_choice_explicit_wins_over_env_var():
assert resolve_truthy_env_var_choice(env="true", choice=False) is False
assert resolve_env_var_choice(env="something", choice=False) is False
@@ -138,3 +145,33 @@ def test_resolve_max_age_env_var_wins_over_default_value(monkeypatch: pytest.Mon
# THEN the result must be the environment variable value
assert max_age == 20
+
+
+def test_abs_lambda_path_empty():
+ # Given Env is not set
+ os.environ["LAMBDA_TASK_ROOT"] = ""
+ # Then path = os.getcwd
+ assert abs_lambda_path() == f"{Path.cwd()}"
+
+
+def test_abs_lambda_path_empty_envvar(default_lambda_path):
+ # Given Env is set
+ os.environ["LAMBDA_TASK_ROOT"] = default_lambda_path
+ # Then path = Env/
+ assert abs_lambda_path() == default_lambda_path
+
+
+def test_abs_lambda_path_w_filename():
+ # Given Env is not set and relative_path provided
+ relatvie_path = "cert/pub.cert"
+ os.environ["LAMBDA_TASK_ROOT"] = ""
+ # Then path = os.getcwd + relative_path
+ assert abs_lambda_path(relatvie_path) == str(Path(Path.cwd(), relatvie_path))
+
+
+def test_abs_lambda_path_w_filename_envvar(default_lambda_path):
+ # Given Env is set and relative_path provided
+ relative_path = "cert/pub.cert"
+ os.environ["LAMBDA_TASK_ROOT"] = default_lambda_path
+ # Then path = env + relative_path
+ assert abs_lambda_path(relative_path="cert/pub.cert") == str(Path(os.environ["LAMBDA_TASK_ROOT"], relative_path))