Skip to content

Commit

Permalink
Implement the option to create Azure NSS buckets using the new logs-e…
Browse files Browse the repository at this point in the history
…nabled storage account (red-hat-storage#8707)

* Add azure-with-logs nss support for factory

Signed-off-by: Sagi Hirshfeld <[email protected]>
  • Loading branch information
sagihirshfeld authored Nov 18, 2023
1 parent 0114bae commit 63d9468
Show file tree
Hide file tree
Showing 7 changed files with 169 additions and 17 deletions.
3 changes: 2 additions & 1 deletion ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -978,6 +978,7 @@
# Platforms
AWS_PLATFORM = "aws"
AZURE_PLATFORM = "azure"
AZURE_WITH_LOGS_PLATFORM = "azure-with-logs"
GCP_PLATFORM = "gcp"
VSPHERE_PLATFORM = "vsphere"
BAREMETAL_PLATFORM = "baremetal"
Expand Down Expand Up @@ -1863,7 +1864,7 @@
PRODUCTION_JOBS_PREFIX = ["jnk"]

# Cloud Manager available platforms
CLOUD_MNGR_PLATFORMS = ["AWS", "GCP", "AZURE", "IBMCOS"]
CLOUD_MNGR_PLATFORMS = ["AWS", "GCP", "AZURE", "AZURE_WITH_LOGS", "IBMCOS"]

# Vault related configurations
VAULT_VERSION_INFO_URL = "https://github.com/hashicorp/vault/releases/latest"
Expand Down
65 changes: 65 additions & 0 deletions ocs_ci/ocs/resources/cloud_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def __init__(self):
"AWS": S3Client,
"GCP": GoogleClient,
"AZURE": AzureClient,
"AZURE_WITH_LOGS": AzureWithLogsClient,
"IBMCOS": S3Client,
"RGW": S3Client,
}
Expand Down Expand Up @@ -565,3 +566,67 @@ def create_azure_secret(self):
).decode("ascii")

return create_resource(**bs_secret_data)


class AzureWithLogsClient(AzureClient):
"""
Implementation of an Azure Client using the Azure API
to an existing storage account with bucket logs enabled
"""

def __init__(
self, account_name=None, credential=None, auth_dict=None, *args, **kwargs
):
if auth_dict:
self.tenant_id = auth_dict.get("TENANT_ID")
self.app_id = auth_dict.get("APPLICATION_ID")
self.app_secret = auth_dict.get("APPLICATION_SECRET")
self.logs_analytics_workspace_id = auth_dict.get(
"LOGS_ANALYTICS_WORKSPACE_ID"
)
super().__init__(
account_name=account_name,
credential=credential,
auth_dict=auth_dict,
*args,
**kwargs,
)

def create_azure_secret(self):
"""
Create a Kubernetes secret to allow NooBaa to create Azure-based backingstores
Note that this method overides the parent method to include the
additional fields that are needed for the bucket logs feature
"""
bs_secret_data = templating.load_yaml(constants.MCG_BACKINGSTORE_SECRET_YAML)
bs_secret_data["metadata"]["name"] = create_unique_resource_name(
"cldmgr-azure-logs", "secret"
)
bs_secret_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
bs_secret_data["data"]["AccountKey"] = base64.urlsafe_b64encode(
self.credential.encode("UTF-8")
).decode("ascii")
bs_secret_data["data"]["AccountName"] = base64.urlsafe_b64encode(
self.account_name.encode("UTF-8")
).decode("ascii")

# Note that the following encodings are in plain base64, and not urlsafe.
# This is because the urlsafe encoding for this credentials might contain
# characters that are not accepted when creating the secret.
bs_secret_data["data"]["TenantID"] = base64.b64encode(
self.tenant_id.encode("UTF-8")
).decode("ascii")
bs_secret_data["data"]["ApplicationID"] = base64.b64encode(
self.app_id.encode("UTF-8")
).decode("ascii")
bs_secret_data["data"]["ApplicationSecret"] = base64.b64encode(
self.app_secret.encode("UTF-8")
).decode("ascii")
bs_secret_data["data"]["LogsAnalyticsWorkspaceID"] = base64.b64encode(
self.logs_analytics_workspace_id.encode("UTF-8")
).decode("ascii")

return create_resource(**bs_secret_data)
3 changes: 3 additions & 0 deletions ocs_ci/ocs/resources/cloud_uls.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def cloud_uls_factory(request, cld_mgr):
"aws": set(),
"gcp": set(),
"azure": set(),
"azure-with-logs": set(),
"ibmcos": set(),
"rgw": set(),
}
Expand All @@ -33,6 +34,7 @@ def cloud_uls_factory(request, cld_mgr):
"aws": cld_mgr.aws_client,
"gcp": cld_mgr.gcp_client,
"azure": cld_mgr.azure_client,
"azure-with-logs": cld_mgr.azure_with_logs_client,
"ibmcos": cld_mgr.ibmcos_client,
}
except AttributeError as e:
Expand Down Expand Up @@ -66,6 +68,7 @@ def _create_uls(uls_dict):
"aws": set(),
"gcp": set(),
"azure": set(),
"azure-with-logs": set(),
"ibmcos": set(),
"rgw": set(),
}
Expand Down
66 changes: 53 additions & 13 deletions ocs_ci/ocs/resources/mcg_replication_policy.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
from abc import ABC, abstractmethod
import uuid


class McgReplicationPolicy:
"""
A class representing an MCG bucket replication policy.
This class handles the parsing of the relevant parameters to a dictionary that matches the expected JSON structure.
A class to handle the MCG bucket replication policy JSON structure.
"""

Expand All @@ -29,33 +28,74 @@ def __str__(self) -> str:
return str(self.to_dict())


class LogBasedReplicationPolicy(McgReplicationPolicy):
class LogBasedReplicationPolicy(McgReplicationPolicy, ABC):
"""
A subclass of ReplicationPolicy that includes log-based replication information.
An abstract subclass of ReplicationPolicy that includes log-based replication information.
"""

def __init__(
self,
destination_bucket,
sync_deletions=False,
logs_bucket="",
prefix="",
logs_location_prefix="",
):
super().__init__(destination_bucket, prefix)
self.sync_deletions = sync_deletions

@abstractmethod
def to_dict(self):
dict = super().to_dict()
dict["rules"][0]["sync_deletions"] = self.sync_deletions
dict["log_replication_info"] = {}

return dict


class AwsLogBasedReplicationPolicy(LogBasedReplicationPolicy):
"""
A class to handle the AWS log-based bucket replication policy JSON structure.
"""

def __init__(
self,
destination_bucket,
sync_deletions=False,
logs_bucket="",
prefix="",
logs_location_prefix="",
):
super().__init__(destination_bucket, sync_deletions, prefix)
self.logs_bucket = logs_bucket
self.logs_location_prefix = logs_location_prefix

def to_dict(self):
dict = super().to_dict()
dict["rules"][0]["sync_deletions"] = self.sync_deletions
dict["log_replication_info"] = {
"logs_location": {
"logs_bucket": self.logs_bucket,
"prefix": self.logs_location_prefix,
}
dict["log_replication_info"]["logs_location"] = {
"logs_bucket": self.logs_bucket,
"prefix": self.logs_location_prefix,
}

return dict


class AzureLogBasedReplicationPolicy(LogBasedReplicationPolicy):
"""
A class to handle the Azure log-based bucket replication policy JSON structure.
"""

def __init__(
self,
destination_bucket,
sync_deletions=False,
prefix="",
):
super().__init__(destination_bucket, sync_deletions, prefix)

def to_dict(self):
dict = super().to_dict()
dict["log_replication_info"]["endpoint_type"] = "AZURE"

return dict
17 changes: 17 additions & 0 deletions ocs_ci/ocs/resources/namespacestore.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,11 @@ def cli_create_namespacestore(
f"--account-name {get_attr_chain(cld_mgr, 'azure_client.account_name')} "
f"--target-blob-container {uls_name}"
),
constants.AZURE_WITH_LOGS_PLATFORM: lambda: (
f"azure-blob {nss_name} "
f"--secret-name {get_attr_chain(cld_mgr, 'azure_with_logs_client.secret.name')} "
f"--target-blob-container {uls_name}"
),
constants.RGW_PLATFORM: lambda: (
f"s3-compatible {nss_name} "
f"--endpoint {get_attr_chain(cld_mgr, 'rgw_client.endpoint')} "
Expand Down Expand Up @@ -301,6 +306,18 @@ def oc_create_namespacestore(
},
},
},
constants.AZURE_WITH_LOGS_PLATFORM: lambda: {
"type": "azure-blob",
"azureBlob": {
"targetBlobContainer": uls_name,
"secret": {
"name": get_attr_chain(
cld_mgr, "azure_with_logs_client.secret.name"
),
"namespace": nss_data["metadata"]["namespace"],
},
},
},
constants.RGW_PLATFORM: lambda: {
"type": "s3-compatible",
"s3Compatible": {
Expand Down
26 changes: 26 additions & 0 deletions tests/libtest/test_azure_with_logs_infra.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from ocs_ci.ocs import constants
from ocs_ci.framework.pytest_customization.marks import libtest
from ocs_ci.ocs.resources.mcg_replication_policy import AzureLogBasedReplicationPolicy
from ocs_ci.ocs.bucket_utils import bucket_read_api


@libtest
def test_azure_logs_based_repli_setup(bucket_factory, mcg_obj_session):
target_bucket = bucket_factory()[0].name
bucketclass_dict = {
"interface": "OC",
"namespace_policy_dict": {
"type": "Single",
"namespacestore_dict": {constants.AZURE_WITH_LOGS_PLATFORM: [(1, None)]},
},
}
replication_policy = AzureLogBasedReplicationPolicy(
destination_bucket=target_bucket,
sync_deletions=True,
)
source_bucket = bucket_factory(
bucketclass=bucketclass_dict, replication_policy=replication_policy
)[0].name

response = bucket_read_api(mcg_obj_session, source_bucket)
assert "replication_policy_id" in response
6 changes: 3 additions & 3 deletions tests/manage/mcg/test_log_based_bucket_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
tier4b,
)
from ocs_ci.ocs.resources.pod import get_noobaa_pods, get_pod_node
from ocs_ci.ocs.resources.mcg_replication_policy import LogBasedReplicationPolicy
from ocs_ci.ocs.resources.mcg_replication_policy import AwsLogBasedReplicationPolicy
from ocs_ci.ocs.scale_noobaa_lib import noobaa_running_node_restart

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -98,7 +98,7 @@ def log_based_replication_setup(
platform=constants.AWS_PLATFORM,
region=self.DEFAULT_AWS_REGION,
)
replication_policy = LogBasedReplicationPolicy(
replication_policy = AwsLogBasedReplicationPolicy(
destination_bucket=target_bucket.name,
sync_deletions=True,
logs_bucket=mockup_logger.logs_bucket_uls_name,
Expand Down Expand Up @@ -218,7 +218,7 @@ def test_patch_deletion_sync_to_existing_bucket(
platform=constants.AWS_PLATFORM,
region=self.DEFAULT_AWS_REGION,
)
replication_policy = LogBasedReplicationPolicy(
replication_policy = AwsLogBasedReplicationPolicy(
destination_bucket=target_bucket.name,
sync_deletions=True,
logs_bucket=mockup_logger.logs_bucket_uls_name,
Expand Down

0 comments on commit 63d9468

Please sign in to comment.