diff --git a/Makefile b/Makefile
index 33f0bbc..bb7ef9d 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@ HOSTNAME=anypoint.mulesoft.com
NAMESPACE=automation
NAME=anypoint
BINARY=terraform-provider-${NAME}
-VERSION=1.6.1-SNAPSHOT
+VERSION=1.6.2-SNAPSHOT
OS_ARCH=darwin_amd64
default: install
diff --git a/anypoint/data_source_apim_instance.go b/anypoint/data_source_apim_instance.go
index 0baa6ab..d0d8dbf 100644
--- a/anypoint/data_source_apim_instance.go
+++ b/anypoint/data_source_apim_instance.go
@@ -748,7 +748,7 @@ func flattenApimAudit(audit *apim.Audit) map[string]interface{} {
}
}
if updated, ok := audit.GetUpdatedOk(); ok && updated != nil {
- if val, ok := updated.GetDateOk(); ok && updated != nil {
+ if val, ok := updated.GetDateOk(); ok {
result["updated"] = *val
}
}
diff --git a/anypoint/data_source_apim_instance_policy.go b/anypoint/data_source_apim_instance_policy.go
index c2d0a91..cbd4aae 100644
--- a/anypoint/data_source_apim_instance_policy.go
+++ b/anypoint/data_source_apim_instance_policy.go
@@ -205,7 +205,7 @@ func flattenApimInstancePolicyAudit(audit *apim_policy.Audit) map[string]interfa
}
}
if updated, ok := audit.GetUpdatedOk(); ok && updated != nil {
- if val, ok := updated.GetDateOk(); ok && updated != nil {
+ if val, ok := updated.GetDateOk(); ok {
result["updated"] = val
}
}
diff --git a/anypoint/data_source_apim_instance_upstreams.go b/anypoint/data_source_apim_instance_upstreams.go
index 4d80e9d..5fa01d3 100644
--- a/anypoint/data_source_apim_instance_upstreams.go
+++ b/anypoint/data_source_apim_instance_upstreams.go
@@ -216,7 +216,7 @@ func flattenApimUpstreamAudit(audit *apim_upstream.Audit) map[string]interface{}
}
}
if updated, ok := audit.GetUpdatedOk(); ok && updated != nil {
- if val, ok := updated.GetDateOk(); ok && updated != nil {
+ if val, ok := updated.GetDateOk(); ok {
result["updated"] = val.String()
}
}
diff --git a/anypoint/data_source_app_deployment_v2.go b/anypoint/data_source_app_deployment_v2.go
new file mode 100644
index 0000000..ac15800
--- /dev/null
+++ b/anypoint/data_source_app_deployment_v2.go
@@ -0,0 +1,1061 @@
+package anypoint
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "maps"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ application_manager_v2 "github.com/mulesoft-anypoint/anypoint-client-go/application_manager_v2"
+)
+
+var ReplicasReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The unique id of the mule app replica.",
+ },
+ "state": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The current state of the replica.",
+ },
+ "deployment_location": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The node id in which the replica is deployed.",
+ },
+ "current_deployment_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The version deployed in the replica.",
+ },
+ "reason": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "In case of an error, it should provide information about the root cause.",
+ },
+ },
+}
+
+var DeplApplicationRefReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "group_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The groupId of the application.",
+ },
+ "artifact_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The artifactId of the application.",
+ },
+ "version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The version of the application.",
+ },
+ "packaging": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The packaging of the application.",
+ },
+ },
+}
+
+var DeplApplicationConfigPropsReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "application_name": {
+ Type: schema.TypeString,
+ Description: "The application name",
+ Computed: true,
+ },
+ "properties": {
+ Type: schema.TypeMap,
+ Description: "The mule application properties.",
+ Computed: true,
+ },
+ "secure_properties": {
+ Type: schema.TypeMap,
+ Description: "The mule application secured properties.",
+ Computed: true,
+ },
+ },
+}
+
+var DeplApplicationConfigLoggingReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "artifact_name": {
+ Type: schema.TypeString,
+ Description: "The application name.",
+ Computed: true,
+ },
+ "scope_logging_configurations": {
+ Type: schema.TypeList,
+ Description: "Additional log levels and categories to include in logs.",
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "scope": {
+ Type: schema.TypeString,
+ Description: "The logging package scope",
+ Computed: true,
+ },
+ "log_level": {
+ Type: schema.TypeString,
+ Description: "The application log level: INFO / DEBUG / WARNING / ERROR / FATAL",
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+}
+
+var DeplApplicationConfigSchedulingReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "application_name": {
+ Type: schema.TypeString,
+ Description: "The mule application name.",
+ Computed: true,
+ },
+ "schedulers": {
+ Type: schema.TypeList,
+ Description: "The mule app schedulers details",
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Description: "The scheduler name",
+ Computed: true,
+ },
+ "type": {
+ Type: schema.TypeString,
+ Description: "The scheduler type",
+ Computed: true,
+ },
+ "flow_name": {
+ Type: schema.TypeString,
+ Description: "The scheduler flow name",
+ Computed: true,
+ },
+ "enabled": {
+ Type: schema.TypeBool,
+ Description: "Whether the scheduler is enabled or not.",
+ Computed: true,
+ },
+ "time_unit": {
+ Type: schema.TypeString,
+ Description: "The scheduler's time unit.",
+ Computed: true,
+ },
+ "frequency": {
+ Type: schema.TypeString,
+ Description: "The scheduler's frequency",
+ Computed: true,
+ },
+ "start_delay": {
+ Type: schema.TypeString,
+ Description: "The scheduler's start delay",
+ Computed: true,
+ },
+ "expression": {
+ Type: schema.TypeString,
+ Description: "The scheduler's cron expression",
+ Computed: true,
+ },
+ "time_zone": {
+ Type: schema.TypeString,
+ Description: "The scheduler's time zone",
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+}
+
+var DeplApplicationConfigReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "mule_agent_app_props_service": {
+ Type: schema.TypeList,
+ Description: "The mule app properties",
+ Elem: DeplApplicationConfigPropsReadOnlyDefinition,
+ Computed: true,
+ },
+ "mule_agent_logging_service": {
+ Type: schema.TypeList,
+ Description: "The mule app logging props",
+ Elem: DeplApplicationConfigLoggingReadOnlyDefinition,
+ Computed: true,
+ },
+ "mule_agent_scheduling_service": {
+ Type: schema.TypeList,
+ Description: "The mule app scheduling",
+ Elem: DeplApplicationConfigSchedulingReadOnlyDefinition,
+ Computed: true,
+ },
+ },
+}
+
+var DeplApplicationReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The status of the application.",
+ },
+ "desired_state": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The desired state of the application.",
+ },
+ "ref": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The reference to the deployed artifact on exchange.",
+ Elem: DeplApplicationRefReadOnlyDefinition,
+ },
+ "configuration": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The configuration of the application.",
+ Elem: DeplApplicationConfigReadOnlyDefinition,
+ },
+ "vcores": {
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "The allocated virtual cores.",
+ },
+ "object_store_v2_enabled": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether object store v2 is enabled.",
+ },
+ },
+}
+
+var DeplTargetDeplSettHttpReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "inbound_public_url": {
+ Type: schema.TypeString,
+ Description: "The inbound public url",
+ Computed: true,
+ },
+ "inbound_path_rewrite": {
+ Type: schema.TypeString,
+ Description: "The inbound path rewrite",
+ Computed: true,
+ },
+ "inbound_last_mile_security": {
+ Type: schema.TypeBool,
+ Description: "Last-mile security means that the connection between ingress and the actual Mule app will be HTTPS.",
+ Computed: true,
+ },
+ "inbound_forward_ssl_session": {
+ Type: schema.TypeBool,
+ Description: "Whether to forward the ssl session",
+ Computed: true,
+ },
+ "inbound_internal_url": {
+ Type: schema.TypeString,
+ Description: "The inbound internal url",
+ Computed: true,
+ },
+ "inbound_unique_id": {
+ Type: schema.TypeString,
+ Description: "The inbound unique id",
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetDeplSettRuntimeReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "version": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to:
+ - a full image version with tag (i.e "4.6.0:40e-java17"),
+ - a base version with a partial tag not indicating the java version (i.e. "4.6.0:40")
+ - or only a base version (i.e. "4.6.0").
+ Defaults to the latest image version.
+ This field has precedence over the legacy 'target.deploymentSettings.runtimeVersion'
+ Learn more about Mule runtime release notes [here](https://docs.mulesoft.com/release-notes/runtime-fabric/runtime-fabric-runtimes-release-notes)
+ `,
+ Computed: true,
+ },
+ "release_channel": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to one of:
+ - "LTS"
+ - "EDGE"
+ - "LEGACY".
+ Defaults to "EDGE". This field has precedence over the legacy 'target.deploymentSettings.runtimeReleaseChannel'.
+ Learn more on release channels [here](https://docs.mulesoft.com/release-notes/mule-runtime/lts-edge-release-cadence).
+ `,
+ Computed: true,
+ },
+ "java": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to one of:
+ - "8"
+ - "17"
+ Defaults to "8".
+ Learn more about Java support [here](https://docs.mulesoft.com/general/java-support).
+ `,
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetDeplSettAutoscalingReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enabled": {
+ Type: schema.TypeBool,
+ Description: "Enables or disables the Autoscaling feature. The possible values are: true or false.",
+ Computed: true,
+ },
+ "min_replicas": {
+ Type: schema.TypeInt,
+ Description: "Set the minimum amount of replicas for your deployment. The minimum accepted value is 1. The maximum is 3.",
+ Computed: true,
+ },
+ "max_replicas": {
+ Type: schema.TypeInt,
+ Description: "Set the maximum amount of replicas your application can scale to. The minimum accepted value is 2. The maximum is 32.",
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetDeplSettResourcesReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_limit": {
+ Type: schema.TypeString,
+ Description: "The CPU limit",
+ Computed: true,
+ },
+ "cpu_reserved": {
+ Type: schema.TypeString,
+ Description: "The CPU reserved",
+ Computed: true,
+ },
+ "memory_limit": {
+ Type: schema.TypeString,
+ Description: "The memory limit",
+ Computed: true,
+ },
+ "memory_reserved": {
+ Type: schema.TypeString,
+ Description: "The memory reserved",
+ Computed: true,
+ },
+ "storage_limit": {
+ Type: schema.TypeString,
+ Description: "The storage limit",
+ Computed: true,
+ },
+ "storage_reserved": {
+ Type: schema.TypeString,
+ Description: "The storage reserved",
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetDeplSettSidecarsReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "anypoint_monitoring_image": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "anypoint_monitoring_resources_cpu_limit": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "anypoint_monitoring_resources_cpu_reserved": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "anypoint_monitoring_resources_memory_limit": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "anypoint_monitoring_resources_memory_reserved": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetDeploymentSettingsReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "clustered": {
+ Type: schema.TypeBool,
+ Description: "Whether the application is deployed in clustered mode.",
+ Computed: true,
+ },
+ "enforce_deploying_replicas_across_nodes": {
+ Type: schema.TypeBool,
+ Description: "If true, forces the deployment of replicas across the RTF cluster. This option only available for Runtime Fabrics.",
+ Computed: true,
+ },
+ "http": {
+ Type: schema.TypeList,
+ Description: "The details about http inbound or outbound configuration",
+ Computed: true,
+ Elem: DeplTargetDeplSettHttpReadOnlyDefinition,
+ },
+ "jvm_args": {
+ Type: schema.TypeString,
+ Description: "The java virtual machine arguments",
+ Computed: true,
+ },
+ "runtime": {
+ Type: schema.TypeList,
+ Description: "The Mule app runtime version info.",
+ Computed: true,
+ Elem: DeplTargetDeplSettRuntimeReadOnlyDefinition,
+ },
+ "autoscaling": {
+ Type: schema.TypeList,
+ Description: `
+ Use this object to provide CPU Based Horizontal Autoscaling configuration on deployment and redeployment operations. This object is optional.
+ If Autoscaling is disabled and the fields "minReplicas" and "maxReplicas" are provided, they must match the value of "target.replicas" field.
+ Learn more about Autoscaling [here](https://docs.mulesoft.com/cloudhub-2/ch2-configure-horizontal-autoscaling).
+ `,
+ Computed: true,
+ Elem: DeplTargetDeplSettAutoscalingReadOnlyDefinition,
+ },
+ "update_strategy": {
+ Type: schema.TypeString,
+ Description: "The mule app update strategy: rolling or recreate",
+ Computed: true,
+ },
+ "resources": {
+ Type: schema.TypeList,
+ Description: "The mule app allocated resources",
+ Elem: DeplTargetDeplSettResourcesReadOnlyDefinition,
+ Computed: true,
+ },
+ "last_mile_security": {
+ Type: schema.TypeBool,
+ Description: "Whether last mile security is active",
+ Computed: true,
+ },
+ "disable_am_log_forwarding": {
+ Type: schema.TypeBool,
+ Description: "Whether log forwarding is disabled.",
+ Computed: true,
+ },
+ "persistent_object_store": {
+ Type: schema.TypeBool,
+ Description: "Whether persistent object store is enabled.",
+ Computed: true,
+ },
+ "anypoint_monitoring_scope": {
+ Type: schema.TypeString,
+ Description: "The anypoint moniroting scope",
+ Computed: true,
+ },
+ "sidecars": {
+ Type: schema.TypeList,
+ Description: "The mule app sidecars.",
+ Elem: DeplTargetDeplSettSidecarsReadOnlyDefinition,
+ Computed: true,
+ },
+ "forward_ssl_session": {
+ Type: schema.TypeBool,
+ Description: "Whether the ssl session is forwarded to the mule app.",
+ Computed: true,
+ },
+ "disable_external_log_forwarding": {
+ Type: schema.TypeBool,
+ Description: "Whether the log forwarding is disabled.",
+ Computed: true,
+ },
+ "tracing_enabled": {
+ Type: schema.TypeBool,
+ Description: "Whether the log tracing is enabled.",
+ Computed: true,
+ },
+ "generate_default_public_url": {
+ Type: schema.TypeBool,
+ Description: "Whether default public url should be generated",
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetReadOnlyDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "provider": {
+ Type: schema.TypeString,
+ Description: "The cloud provider the target belongs to.",
+ Computed: true,
+ },
+ "target_id": {
+ Type: schema.TypeString,
+ Description: "The unique identifier of the target.",
+ Computed: true,
+ },
+ "deployment_settings": {
+ Type: schema.TypeList,
+ Description: "The settings of the target for the deployment to perform.",
+ Elem: DeplTargetDeploymentSettingsReadOnlyDefinition,
+ Computed: true,
+ },
+ "replicas": {
+ Type: schema.TypeInt,
+ Description: "The number of replicas",
+ Computed: true,
+ },
+ },
+}
+
+func dataSourceAppDeploymentV2() *schema.Resource {
+ return &schema.Resource{
+ ReadContext: dataSourceAppDeploymentV2Read,
+ Description: `
+ Reads a specific ` + "`" + `Deployment` + "`" + `.
+ This only works for Cloudhub V2 and Runtime Fabrics Apps.
+ `,
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The unique id of the mule app deployment in the platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The organization where the mule app is deployed.",
+ },
+ "env_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The environment where mule app is deployed.",
+ },
+ "name": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The name of the deployed mule app.",
+ },
+ "creation_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The creation date of the mule app.",
+ },
+ "last_modified_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The last modification date of the mule app.",
+ },
+ "desired_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The deployment desired version of the mule app.",
+ },
+ "replicas": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Data of the mule app replicas",
+ Elem: ReplicasReadOnlyDefinition,
+ },
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Data of the mule app replicas",
+ },
+ "application": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The details of the application to deploy",
+ Elem: DeplApplicationReadOnlyDefinition,
+ },
+ "target": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The details of the target to perform the deployment on.",
+ Elem: DeplTargetReadOnlyDefinition,
+ },
+ "last_successful_version": {
+ Type: schema.TypeString,
+ Description: "The last successfully deployed version",
+ Computed: true,
+ },
+ },
+ }
+}
+
+func dataSourceAppDeploymentV2Read(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ id := d.Get("id").(string)
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ //execut request
+ res, httpr, err := pco.appmanagerclient.DefaultApi.GetDeploymentById(authctx, orgid, envid, id).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to get deployment for org " + orgid + " and env " + envid + " with id " + id,
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ data := flattenAppDeploymentV2(res)
+ if err := setAppDeploymentV2AttributesToResourceData(d, data); err != nil {
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set App Deployment details attributes",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+ d.SetId(res.GetId())
+ return diags
+}
+
+func flattenAppDeploymentV2(deployment *application_manager_v2.Deployment) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := deployment.GetNameOk(); ok {
+ item["name"] = *val
+ }
+ if val, ok := deployment.GetCreationDateOk(); ok {
+ item["creation_date"] = *val
+ }
+ if val, ok := deployment.GetLastModifiedDateOk(); ok {
+ item["last_modified_date"] = *val
+ }
+ if val, ok := deployment.GetDesiredVersionOk(); ok {
+ item["desired_version"] = *val
+ }
+ if val, ok := deployment.GetReplicasOk(); ok {
+ item["replicas"] = flattenAppDeploymentV2Replicas(val)
+ }
+ if val, ok := deployment.GetStatusOk(); ok {
+ item["status"] = *val
+ }
+ if application, ok := deployment.GetApplicationOk(); ok {
+ item["application"] = []interface{}{flattenAppDeploymentV2Application(application)}
+ }
+ if target, ok := deployment.GetTargetOk(); ok {
+ item["target"] = []interface{}{flattenAppDeploymentV2Target(target)}
+ }
+ if val, ok := deployment.GetLastSuccessfulVersionOk(); ok && val != nil {
+ item["last_successful_version"] = *val
+ }
+
+ return item
+}
+
+// Flattens the replicas array. Only includes replicas with id in the final result.
+func flattenAppDeploymentV2Replicas(replicas []application_manager_v2.Replicas) []interface{} {
+ res := make([]interface{}, 0)
+ for _, replica := range replicas {
+ if replica.HasId() {
+ res = append(res, flattenAppDeploymentV2Replica(&replica))
+ }
+ }
+ return res
+}
+
+// maps a replica object
+func flattenAppDeploymentV2Replica(replica *application_manager_v2.Replicas) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := replica.GetIdOk(); ok {
+ item["id"] = *val
+ }
+ if val, ok := replica.GetStateOk(); ok {
+ item["state"] = *val
+ }
+ if val, ok := replica.GetDeploymentLocationOk(); ok {
+ item["deployment_location"] = *val
+ }
+ if val, ok := replica.GetCurrentDeploymentVersionOk(); ok {
+ item["current_deployment_version"] = *val
+ }
+ if val, ok := replica.GetReasonOk(); ok {
+ item["reason"] = *val
+ }
+ return item
+}
+
+func flattenAppDeploymentV2Application(application *application_manager_v2.Application) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := application.GetStatusOk(); ok {
+ item["status"] = *val
+ }
+ if val, ok := application.GetDesiredStateOk(); ok {
+ item["desired_state"] = *val
+ }
+ if ref, ok := application.GetRefOk(); ok {
+ item["ref"] = []interface{}{flattenAppDeploymentV2Ref(ref)}
+ }
+ if config, ok := application.GetConfigurationOk(); ok {
+ item["configuration"] = []interface{}{flattenAppDeploymentV2Config(config)}
+ }
+ if val, ok := application.GetVCoresOk(); ok {
+ item["vcores"] = RoundFloat64(float64(*val), 1) // Insures that the value would be 0.1 and not 0.10000000149011612 for example
+ }
+ if integrations, ok := application.GetIntegrationsOk(); ok {
+ data := flattenAppDeploymentV2Integrations(integrations)
+ maps.Copy(item, data)
+ }
+ return item
+}
+
+func flattenAppDeploymentV2Target(target *application_manager_v2.Target) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := target.GetProviderOk(); ok {
+ item["provider"] = *val
+ }
+ if val, ok := target.GetTargetIdOk(); ok {
+ item["target_id"] = *val
+ }
+ if deployment_settings, ok := target.GetDeploymentSettingsOk(); ok {
+ item["deployment_settings"] = []interface{}{flattenAppDeploymentV2TargetDeplSett(deployment_settings)}
+ }
+ if val, ok := target.GetReplicasOk(); ok {
+ item["replicas"] = *val
+ }
+ return item
+}
+
+func flattenAppDeploymentV2Ref(ref *application_manager_v2.Ref) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := ref.GetGroupIdOk(); ok {
+ item["group_id"] = *val
+ }
+ if val, ok := ref.GetArtifactIdOk(); ok {
+ item["artifact_id"] = *val
+ }
+ if val, ok := ref.GetVersionOk(); ok {
+ item["version"] = *val
+ }
+ if val, ok := ref.GetPackagingOk(); ok {
+ item["packaging"] = *val
+ }
+ return item
+}
+
+func flattenAppDeploymentV2Config(config *application_manager_v2.AppConfiguration) map[string]interface{} {
+ item := make(map[string]interface{})
+ if srv, ok := config.GetMuleAgentApplicationPropertiesServiceOk(); ok {
+ item["mule_agent_app_props_service"] = []interface{}{flattenAppDeploymentV2ConfigMAAPS(srv)}
+ }
+ if srv, ok := config.GetMuleAgentLoggingServiceOk(); ok {
+ item["mule_agent_logging_service"] = []interface{}{flattenAppDeploymentV2ConfigMALS(srv)}
+ }
+ if srv, ok := config.GetMuleAgentSchedulingServiceOk(); ok {
+ item["mule_agent_scheduling_service"] = []interface{}{flattenAppDeploymentV2ConfigMASS(srv)}
+ }
+ return item
+}
+
+func flattenAppDeploymentV2ConfigMAAPS(service *application_manager_v2.MuleAgentAppPropService) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := service.GetApplicationNameOk(); ok {
+ item["application_name"] = *val
+ }
+ if val, ok := service.GetPropertiesOk(); ok {
+ item["properties"] = val
+ }
+ if val, ok := service.GetSecurePropertiesOk(); ok {
+ item["secure_properties"] = val
+ }
+ return item
+}
+
+func flattenAppDeploymentV2ConfigMALS(service *application_manager_v2.MuleAgentLoggingService) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := service.GetArtifactNameOk(); ok {
+ item["artifact_name"] = *val
+ }
+ if scope_logging_conf, ok := service.GetScopeLoggingConfigurationsOk(); ok {
+ res := make([]interface{}, len(scope_logging_conf))
+ for i, cfg := range scope_logging_conf {
+ d := make(map[string]interface{})
+ if val, ok := cfg.GetScopeOk(); ok {
+ d["scope"] = *val
+ }
+ if val, ok := cfg.GetLogLevelOk(); ok {
+ d["log_level"] = *val
+ }
+ res[i] = d
+ }
+ item["scope_logging_configurations"] = res
+ }
+ return item
+}
+
+func flattenAppDeploymentV2ConfigMASS(service *application_manager_v2.MuleAgentSchedulingService) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := service.GetApplicationNameOk(); ok {
+ item["application_name"] = *val
+ }
+ if schedulers, ok := service.GetSchedulersOk(); ok {
+ res := make([]interface{}, len(schedulers))
+ for i, scheduler := range schedulers {
+ d := make(map[string]interface{})
+ if val, ok := scheduler.GetNameOk(); ok {
+ d["name"] = *val
+ }
+ if val, ok := scheduler.GetTypeOk(); ok {
+ d["type"] = *val
+ }
+ if val, ok := scheduler.GetFlowNameOk(); ok {
+ d["flow_name"] = *val
+ }
+ if val, ok := scheduler.GetEnabledOk(); ok {
+ d["enabled"] = *val
+ }
+ if val, ok := scheduler.GetTimeUnitOk(); ok {
+ d["time_unit"] = *val
+ }
+ if val, ok := scheduler.GetFrequencyOk(); ok {
+ d["frequency"] = *val
+ }
+ if val, ok := scheduler.GetStartDelayOk(); ok {
+ d["start_delay"] = *val
+ }
+ if val, ok := scheduler.GetExpressionOk(); ok {
+ d["expression"] = *val
+ }
+ if val, ok := scheduler.GetTimeZoneOk(); ok {
+ d["time_zone"] = *val
+ }
+ res[i] = d
+ }
+ item["schedulers"] = res
+ }
+ return item
+}
+
+func flattenAppDeploymentV2TargetDeplSett(deployment_settings *application_manager_v2.DeploymentSettings) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := deployment_settings.GetClusteredOk(); ok {
+ item["clustered"] = *val
+ }
+ if val, ok := deployment_settings.GetEnforceDeployingReplicasAcrossNodesOk(); ok {
+ item["enforce_deploying_replicas_across_nodes"] = *val
+ }
+ if http, ok := deployment_settings.GetHttpOk(); ok {
+ item["http"] = []interface{}{flattenAppDeploymentV2TargetDeplSettHttp(http)}
+ }
+ if jvm, ok := deployment_settings.GetJvmOk(); ok {
+ if val, ok := jvm.GetArgsOk(); ok {
+ item["jvm_args"] = *val
+ }
+ }
+ if runtime, ok := deployment_settings.GetRuntimeOk(); ok {
+ item["runtime"] = []interface{}{flattenAppDeploymentV2TargetDeplSettRuntime(runtime)}
+ }
+ if autoscaling, ok := deployment_settings.GetAutoscalingOk(); ok {
+ item["autoscaling"] = []interface{}{flattenAppDeploymentV2TargetDeplSettAutoscaling(autoscaling)}
+ }
+ if val, ok := deployment_settings.GetUpdateStrategyOk(); ok {
+ item["update_strategy"] = *val
+ }
+ if resources, ok := deployment_settings.GetResourcesOk(); ok {
+ item["resources"] = []interface{}{flattenAppDeploymentV2TargetDeplSettResources(resources)}
+ }
+ if val, ok := deployment_settings.GetLastMileSecurityOk(); ok {
+ item["last_mile_security"] = *val
+ }
+ if val, ok := deployment_settings.GetDisableAmLogForwardingOk(); ok {
+ item["disable_am_log_forwarding"] = *val
+ }
+ if val, ok := deployment_settings.GetPersistentObjectStoreOk(); ok {
+ item["persistent_object_store"] = *val
+ }
+ if val, ok := deployment_settings.GetAnypointMonitoringScopeOk(); ok {
+ item["anypoint_monitoring_scope"] = *val
+ }
+ if sidecars, ok := deployment_settings.GetSidecarsOk(); ok {
+ item["sidecars"] = []interface{}{flattenAppDeploymentV2TargetDeplSettSidecars(sidecars)}
+ }
+ if val, ok := deployment_settings.GetForwardSslSessionOk(); ok {
+ item["forward_ssl_session"] = *val
+ }
+ if val, ok := deployment_settings.GetDisableExternalLogForwardingOk(); ok {
+ item["disable_external_log_forwarding"] = *val
+ }
+ if val, ok := deployment_settings.GetTracingEnabledOk(); ok {
+ item["tracing_enabled"] = *val
+ }
+ if val, ok := deployment_settings.GetGenerateDefaultPublicUrlOk(); ok {
+ item["generate_default_public_url"] = *val
+ }
+ return item
+}
+
+func flattenAppDeploymentV2TargetDeplSettHttp(http *application_manager_v2.Http) map[string]interface{} {
+ item := make(map[string]interface{})
+ if inbound, ok := http.GetInboundOk(); ok {
+ if val, ok := inbound.GetPublicUrlOk(); ok {
+ item["inbound_public_url"] = *val
+ }
+ if val, ok := inbound.GetPathRewriteOk(); ok {
+ item["inbound_path_rewrite"] = *val
+ }
+ if val, ok := inbound.GetLastMileSecurityOk(); ok {
+ item["inbound_last_mile_security"] = *val
+ }
+ if val, ok := inbound.GetForwardSslSessionOk(); ok {
+ item["inbound_forward_ssl_session"] = *val
+ }
+ if val, ok := inbound.GetInternalUrlOk(); ok {
+ item["inbound_internal_url"] = *val
+ }
+ if val, ok := inbound.GetUniqueIdOk(); ok {
+ item["inbound_unique_id"] = *val
+ }
+ }
+ return item
+}
+
+func flattenAppDeploymentV2TargetDeplSettRuntime(runtime *application_manager_v2.Runtime) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := runtime.GetVersionOk(); ok {
+ item["version"] = *val
+ }
+ if val, ok := runtime.GetReleaseChannelOk(); ok {
+ item["release_channel"] = *val
+ }
+ if val, ok := runtime.GetJavaOk(); ok {
+ item["java"] = *val
+ }
+ return item
+}
+
+func flattenAppDeploymentV2TargetDeplSettAutoscaling(autoscaling *application_manager_v2.Autoscaling) map[string]interface{} {
+ item := make(map[string]interface{})
+ if val, ok := autoscaling.GetEnabledOk(); ok {
+ item["enabled"] = *val
+ }
+ if val, ok := autoscaling.GetMinReplicasOk(); ok {
+ item["min_replicas"] = *val
+ }
+ if val, ok := autoscaling.GetMaxReplicasOk(); ok {
+ item["max_replicas"] = *val
+ }
+ return item
+}
+
+func flattenAppDeploymentV2TargetDeplSettResources(resources *application_manager_v2.Resources) map[string]interface{} {
+ item := make(map[string]interface{})
+ if cpu, ok := resources.GetCpuOk(); ok {
+ if val, ok := cpu.GetLimitOk(); ok {
+ item["cpu_limit"] = *val
+ }
+ if val, ok := cpu.GetReservedOk(); ok {
+ item["cpu_reserved"] = *val
+ }
+ }
+ if memory, ok := resources.GetMemoryOk(); ok {
+ if val, ok := memory.GetLimitOk(); ok {
+ item["memory_limit"] = *val
+ }
+ if val, ok := memory.GetReservedOk(); ok {
+ item["memory_reserved"] = *val
+ }
+ }
+ if storage, ok := resources.GetStorageOk(); ok {
+ if val, ok := storage.GetLimitOk(); ok {
+ item["storage_limit"] = *val
+ }
+ if val, ok := storage.GetReservedOk(); ok {
+ item["storage_reserved"] = *val
+ }
+ }
+ return item
+}
+
+func flattenAppDeploymentV2TargetDeplSettSidecars(sidecars *application_manager_v2.Sidecars) map[string]interface{} {
+ item := make(map[string]interface{})
+ if anypoint_monitoring, ok := sidecars.GetAnypointMonitoringOk(); ok {
+ if val, ok := anypoint_monitoring.GetImageOk(); ok {
+ item["anypoint_monitoring_image"] = *val
+ }
+ if resources, ok := anypoint_monitoring.GetResourcesOk(); ok {
+ if cpu, ok := resources.GetCpuOk(); ok {
+ if val, ok := cpu.GetLimitOk(); ok {
+ item["anypoint_monitoring_resources_cpu_limit"] = *val
+ }
+ if val, ok := cpu.GetReservedOk(); ok {
+ item["anypoint_monitoring_resources_cpu_reserved"] = *val
+ }
+ }
+ if memory, ok := resources.GetMemoryOk(); ok {
+ if val, ok := memory.GetLimitOk(); ok {
+ item["anypoint_monitoring_resources_memory_limit"] = *val
+ }
+ if val, ok := memory.GetReservedOk(); ok {
+ item["anypoint_monitoring_resources_memory_reserved"] = *val
+ }
+ }
+ }
+ }
+ return item
+}
+
+func flattenAppDeploymentV2Integrations(integrations *application_manager_v2.ApplicationIntegrations) map[string]interface{} {
+ item := make(map[string]interface{})
+ if services, ok := integrations.GetServicesOk(); ok {
+ if object_store_v2, ok := services.GetObjectStoreV2Ok(); ok {
+ item["object_store_v2_enabled"] = object_store_v2.GetEnabled()
+ }
+ }
+ return item
+}
+
+// Set Attributes
+func setAppDeploymentV2AttributesToResourceData(d *schema.ResourceData, data map[string]interface{}) error {
+ attributes := getAppDeploymentV2Attributes()
+ if data != nil {
+ for _, attr := range attributes {
+ if val, ok := data[attr]; ok {
+ if err := d.Set(attr, val); err != nil {
+ return fmt.Errorf("unable to set app deployment attribute %s\n\tdetails: %s", attr, err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func getAppDeploymentV2Attributes() []string {
+ attributes := [...]string{
+ "name", "creation_date", "last_modified_date", "desired_version",
+ "replicas", "status", "application", "target", "last_successful_version",
+ }
+ return attributes[:]
+}
+
+/*
+ * Returns authentication context (includes authorization header)
+ */
+func getAppDeploymentV2AuthCtx(ctx context.Context, pco *ProviderConfOutput) context.Context {
+ tmp := context.WithValue(ctx, application_manager_v2.ContextAccessToken, pco.access_token)
+ return context.WithValue(tmp, application_manager_v2.ContextServerIndex, pco.server_index)
+}
diff --git a/anypoint/data_source_app_deployments_v2.go b/anypoint/data_source_app_deployments_v2.go
new file mode 100644
index 0000000..d7284cc
--- /dev/null
+++ b/anypoint/data_source_app_deployments_v2.go
@@ -0,0 +1,282 @@
+package anypoint
+
+import (
+ "context"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/mulesoft-anypoint/anypoint-client-go/application_manager_v2"
+)
+
+func dataSourceAppDeploymentsV2() *schema.Resource {
+ return &schema.Resource{
+ ReadContext: dataSourceAppDeploymentsV2Read,
+ Description: `
+ Reads ` + "`" + `Deployments` + "`" + ` from the runtime manager for a given organization and environment.
+ This only works for Cloudhub V2 and Runtime Fabrics Apps.
+ `,
+ Schema: map[string]*schema.Schema{
+ "org_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The organization where to query deployments.",
+ },
+ "env_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The environment id where to get deployments from",
+ },
+ "params": {
+ Type: schema.TypeSet,
+ Optional: true,
+ Description: "The search parameters. Should only provide one occurrence of the block.",
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "target_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "The id of the target the deployments are deployed to.",
+ },
+ "offset": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: "Skip over a number of elements by specifying an offset value for the query.",
+ },
+ "limit": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 25,
+ Description: "Limit the number of elements in the response.",
+ },
+ },
+ },
+ },
+ "deployments": {
+ Type: schema.TypeList,
+ Description: "The result of the query with the list of all deployments.",
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Description: "The id of the mule app deployment",
+ Computed: true,
+ },
+ "name": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The name of the deployed mule app.",
+ },
+ "creation_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The creation date of the mule app.",
+ },
+ "last_modified_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The last modification date of the mule app.",
+ },
+ "target_provider": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The cloud provider the target belongs to.",
+ },
+ "target_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The target id",
+ },
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: `Status of the mule app, which may be one of:
+ - PARTIALLY_STARTED
+ - DEPLOYMENT_FAILED
+ - STARTING
+ - STARTED
+ - STOPPING
+ - STOPPED
+ - UNDEPLOYING
+ - UNDEPLOYED
+ - UPDATED
+ - APPLIED
+ - APPLYING
+ - FAILED
+ - DELETED
+ `,
+ },
+ "application_status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "More simplistic status that can be either RUNNING or NOT_RUNNING",
+ // ValidateDiagFunc: validation.ToDiagFunc(
+ // validation.StringInSlice(
+ // []string{"RUNNING", "NOT_RUNNING"},
+ // false,
+ // ),
+ // ),
+ },
+ "current_runtime_version": {
+ Type: schema.TypeString,
+ Description: "The mule app's runtime version",
+ Computed: true,
+ },
+ "last_successful_runtime_version": {
+ Type: schema.TypeString,
+ Description: "The last successful runtime version",
+ Computed: true,
+ },
+ },
+ },
+ },
+ "total": {
+ Type: schema.TypeInt,
+ Description: "The total number of available results",
+ Computed: true,
+ },
+ },
+ }
+}
+
+func dataSourceAppDeploymentsV2Read(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ searchOpts := d.Get("params").(*schema.Set)
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ //prepare request
+ req := pco.appmanagerclient.DefaultApi.GetAllDeployments(authctx, orgid, envid)
+ req, errDiags := parseAppDeploymentSearchOpts(req, searchOpts)
+ if errDiags.HasError() {
+ diags = append(diags, errDiags...)
+ return diags
+ }
+ //execut request
+ res, httpr, err := req.Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to get deployments for org " + orgid + " and env " + envid,
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ deployments := flattenAppDeploymentV2ItemsResult(res.GetItems())
+ if err := d.Set("deployments", deployments); err != nil {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set deployment items for org " + orgid + " and env " + envid,
+ Detail: err.Error(),
+ })
+ return diags
+ }
+ if err := d.Set("total", res.GetTotal()); err != nil {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set total number of deployment items for org " + orgid + " and env " + envid,
+ Detail: err.Error(),
+ })
+ return diags
+ }
+ d.SetId(strconv.FormatInt(time.Now().Unix(), 10))
+
+ return diags
+}
+
+/*
+Parses the api manager search options in order to check if the required search parameters are set correctly.
+Appends the parameters to the given request
+*/
+func parseAppDeploymentSearchOpts(req application_manager_v2.DefaultApiGetAllDeploymentsRequest, params *schema.Set) (application_manager_v2.DefaultApiGetAllDeploymentsRequest, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ if params.Len() == 0 {
+ return req, diags
+ }
+ opts := params.List()[0]
+ for k, v := range opts.(map[string]interface{}) {
+ if k == "target_id" {
+ req = req.TargetId(v.(string))
+ continue
+ }
+ if k == "offset" {
+ req = req.Offset(int32(v.(int)))
+ continue
+ }
+ if k == "limit" {
+ req = req.Limit(int32(v.(int)))
+ continue
+ }
+ }
+ return req, diags
+}
+
+func flattenAppDeploymentV2ItemsResult(items []application_manager_v2.DeploymentItem) []interface{} {
+ if len(items) > 0 {
+ res := make([]interface{}, len(items))
+ for i, item := range items {
+ res[i] = flattenAppDeploymentV2ItemResult(&item)
+ }
+ return res
+ }
+ return make([]interface{}, 0)
+}
+
+func flattenAppDeploymentV2ItemResult(data *application_manager_v2.DeploymentItem) map[string]interface{} {
+ item := make(map[string]interface{})
+ if data == nil {
+ return item
+ }
+ if val, ok := data.GetIdOk(); ok {
+ item["id"] = *val
+ }
+ if val, ok := data.GetNameOk(); ok {
+ item["name"] = *val
+ }
+ if val, ok := data.GetCreationDateOk(); ok {
+ item["creation_date"] = *val
+ }
+ if val, ok := data.GetLastModifiedDateOk(); ok {
+ item["last_modified_date"] = *val
+ }
+ if target, ok := data.GetTargetOk(); ok {
+ if val, ok := target.GetProviderOk(); ok {
+ item["target_provider"] = *val
+ }
+ if val, ok := target.GetTargetIdOk(); ok {
+ item["target_id"] = *val
+ }
+ }
+ if val, ok := data.GetStatusOk(); ok {
+ item["status"] = *val
+ }
+ if app, ok := data.GetApplicationOk(); ok {
+ if val, ok := app.GetStatusOk(); ok {
+ item["application_status"] = *val
+ }
+ }
+ if val, ok := data.GetCurrentRuntimeVersionOk(); ok {
+ item["current_runtime_version"] = *val
+ }
+ if val, ok := data.GetLastSuccessfulRuntimeVersionOk(); ok {
+ item["last_successful_runtime_version"] = *val
+ }
+
+ return item
+}
diff --git a/anypoint/data_source_connected_app.go b/anypoint/data_source_connected_app.go
index 7f505e1..d8ed93f 100644
--- a/anypoint/data_source_connected_app.go
+++ b/anypoint/data_source_connected_app.go
@@ -163,7 +163,7 @@ func dataSourceConnectedAppRead(ctx context.Context, d *schema.ResourceData, m i
// Is it a "on behalf of user" connected apps?
if granttypes := connappinstance["grant_types"]; granttypes != nil && StringInSlice(granttypes.([]string), "client_credentials", true) {
// Yes, then load the scopes using connapps/{connapp_id}/scopes
- if scopes, error := readScopesByConnectedAppId(authctx, orgid, connappid, m); error != nil {
+ if scopes, err := readScopesByConnectedAppId(authctx, orgid, connappid, m); err != nil {
diags := append(diags, diag.Diagnostic{
Severity: diag.Error,
Summary: "Unable to read connected-app " + connappid + " scopes",
diff --git a/anypoint/data_source_fabrics.go b/anypoint/data_source_fabrics.go
new file mode 100644
index 0000000..b178b46
--- /dev/null
+++ b/anypoint/data_source_fabrics.go
@@ -0,0 +1,544 @@
+package anypoint
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ rtf "github.com/mulesoft-anypoint/anypoint-client-go/rtf"
+)
+
+var NodeCapacityDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ "cpu_millis": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ "memory": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "memory_mi": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ "pods": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ },
+}
+
+var NodeStatusDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_healthy": {
+ Type: schema.TypeBool,
+ Computed: true,
+ },
+ "is_ready": {
+ Type: schema.TypeBool,
+ Computed: true,
+ },
+ "is_schedulable": {
+ Type: schema.TypeBool,
+ Computed: true,
+ },
+ },
+}
+
+var NodeDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "uid": {
+ Type: schema.TypeString,
+ Description: "The node id",
+ Computed: true,
+ },
+ "name": {
+ Type: schema.TypeString,
+ Description: "The node name",
+ Computed: true,
+ },
+ "kubelet_version": {
+ Type: schema.TypeString,
+ Description: "The kubelet version of the node",
+ Computed: true,
+ },
+ "docker_version": {
+ Type: schema.TypeString,
+ Description: "The docker version",
+ Computed: true,
+ },
+ "role": {
+ Type: schema.TypeString,
+ Description: "The role of the node in the cluster",
+ Computed: true,
+ },
+ "status": {
+ Type: schema.TypeList,
+ Description: "The status of the node",
+ Computed: true,
+ Elem: NodeStatusDefinition,
+ },
+ "capacity": {
+ Type: schema.TypeList,
+ Description: "The capacity of the node",
+ Computed: true,
+ Elem: NodeCapacityDefinition,
+ },
+ "allocated_request_capacity": {
+ Type: schema.TypeList,
+ Description: "The allocated request capacity of the node",
+ Computed: true,
+ Elem: NodeCapacityDefinition,
+ },
+ "allocated_limit_capacity": {
+ Type: schema.TypeList,
+ Description: "The allocated limit capacity of the node",
+ Computed: true,
+ Elem: NodeCapacityDefinition,
+ },
+ },
+}
+
+var FabricsFeaturesDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enhanced_security": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether enhanced security feature is active",
+ },
+ "persistent_store": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether peristent store feature is active",
+ },
+ },
+}
+
+var FabricsIngressDomainsDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domains": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The list of domains.",
+ Elem: schema.TypeString,
+ },
+ },
+}
+
+var FabricsUpgradeDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The upgrade status.",
+ },
+ },
+}
+
+func dataSourceFabrics() *schema.Resource {
+ return &schema.Resource{
+ ReadContext: dataSourceFabricsRead,
+ Description: `
+ Reads a specific ` + "`" + `Runtime Fabrics'` + "`" + ` instance.
+ `,
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The unique id of the fabrics instance in the platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The organization id where the fabrics is hosted.",
+ },
+ "name": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The name of this fabrics instance.",
+ },
+ "region": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The region where fabrics instance is hosted.",
+ },
+ "vendor": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The vendor name of the kubernetes instance hosting fabrics.",
+ },
+ "vendor_metadata": {
+ Type: schema.TypeMap,
+ Computed: true,
+ Description: "The vendor metadata",
+ },
+ "version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The version of fabrics.",
+ },
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The status of the farbics instance.",
+ },
+ "desired_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The desired version of fabrics.",
+ },
+ "available_upgrade_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The available upgrade version of fabrics.",
+ },
+ "created_at": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The creation date of the fabrics instance.",
+ },
+ "upgrade": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The status of the fabrics. Only available when instance is created and not activated yet.",
+ Elem: FabricsUpgradeDefinition,
+ },
+ "nodes": {
+ Type: schema.TypeList,
+ Computed: true,
+ Elem: NodeDefinition,
+ Description: "The list of fabrics nodes.",
+ },
+ "activation_data": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The activation data to use during installation of fabrics on the kubernetes cluster. Only available when instance is created and not activated yet.",
+ },
+ "seconds_since_heartbeat": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The number of seconds since last heartbeat.",
+ },
+ "kubernetes_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The kubernetes version of the cluster.",
+ },
+ "namespace": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The namespace where runtime fabrics is installed.",
+ },
+ "license_expiry_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The expiry date of the license (timestamp).",
+ },
+ "is_managed": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether this cluster is managed.",
+ },
+ "is_helm_managed": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether this cluster is managed by helmet.",
+ },
+ "app_scoped_log_forwarding": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether app scoped log forwarding is active.",
+ },
+ "cluster_configuration_level": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The configuration level of the cluster (production or development).",
+ },
+ "features": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The features of this cluster.",
+ Elem: FabricsFeaturesDefinition,
+ },
+ "ingress": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The ingress configurations of this cluster.",
+ Elem: FabricsIngressDomainsDefinition,
+ },
+ },
+ }
+}
+
+func dataSourceFabricsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ id := d.Get("id").(string)
+ orgid := d.Get("org_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ //perform request
+ res, httpr, err := pco.rtfclient.DefaultApi.GetFabrics(authctx, orgid, id).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to get fabrics " + id,
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ data := flattenFabricsData(res)
+ if err := setFabricsResourceData(d, data); err != nil {
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set fabrics " + id + " attributes",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+ d.SetId(id)
+
+ return diags
+}
+
+func flattenFabricsData(fabrics *rtf.Fabrics) map[string]interface{} {
+ mappedItem := make(map[string]interface{})
+
+ mappedItem["id"] = fabrics.GetId()
+ mappedItem["name"] = fabrics.GetName()
+ mappedItem["region"] = fabrics.GetRegion()
+ mappedItem["vendor"] = fabrics.GetVendor()
+ if val, ok := fabrics.GetVendorMetadataOk(); ok {
+ mappedItem["vendor_metadata"] = val
+ }
+ if val, ok := fabrics.GetOrganizationIdOk(); ok {
+ mappedItem["org_id"] = *val
+ }
+ if val, ok := fabrics.GetVersionOk(); ok {
+ mappedItem["version"] = *val
+ }
+ if val, ok := fabrics.GetStatusOk(); ok {
+ mappedItem["status"] = *val
+ }
+ if val, ok := fabrics.GetDesiredVersionOk(); ok {
+ mappedItem["desired_version"] = *val
+ }
+ if val, ok := fabrics.GetAvailableUpgradeVersionOk(); ok {
+ mappedItem["available_upgrade_version"] = *val
+ }
+ if val, ok := fabrics.GetCreatedAtOk(); ok {
+ mappedItem["created_at"] = *val
+ }
+ if val, ok := fabrics.GetUpgradeOk(); ok {
+ mappedItem["upgrade"] = flattenFabricsUpgradeData(val)
+ } else {
+ mappedItem["upgrade"] = []interface{}{}
+ }
+ if val, ok := fabrics.GetNodesOk(); ok {
+ mappedItem["nodes"] = flattenFabricsNodesData(val)
+ }
+ if val, ok := fabrics.GetActivationDataOk(); ok {
+ mappedItem["activation_data"] = *val
+ }
+ if val, ok := fabrics.GetSecondsSinceHeartbeatOk(); ok {
+ mappedItem["seconds_since_heartbeat"] = *val
+ }
+ if val, ok := fabrics.GetKubernetesVersionOk(); ok {
+ mappedItem["kubernetes_version"] = *val
+ }
+ if val, ok := fabrics.GetNamespaceOk(); ok {
+ mappedItem["namespace"] = *val
+ }
+ if val, ok := fabrics.GetLicenseExpiryDateOk(); ok {
+ mappedItem["license_expiry_date"] = *val
+ }
+ if val, ok := fabrics.GetIsManagedOk(); ok {
+ mappedItem["is_managed"] = *val
+ }
+ if val, ok := fabrics.GetIsHelmManagedOk(); ok {
+ mappedItem["is_helm_managed"] = *val
+ }
+ if val, ok := fabrics.GetAppScopedLogForwardingOk(); ok {
+ mappedItem["app_scoped_log_forwarding"] = *val
+ }
+ if val, ok := fabrics.GetClusterConfigurationLevelOk(); ok {
+ mappedItem["cluster_configuration_level"] = *val
+ }
+ if val, ok := fabrics.GetFeaturesOk(); ok {
+ mappedItem["features"] = flattenFabricsFeaturesData(val)
+ }
+ if val, ok := fabrics.GetIngressOk(); ok {
+ mappedItem["ingress"] = flattenFabricsIngressData(val)
+ }
+
+ return mappedItem
+}
+
+func flattenFabricsUpgradeData(upgrade *rtf.FabricsUpgrade) []interface{} {
+ if upgrade == nil {
+ return []interface{}{}
+ }
+ data := make(map[string]interface{})
+ data["status"] = upgrade.GetStatus()
+
+ return []interface{}{data}
+}
+
+func flattenFabricsFeaturesData(features *rtf.Features) []interface{} {
+ if features == nil {
+ return []interface{}{}
+ }
+ data := make(map[string]interface{})
+ data["enhanced_security"] = features.GetEnhancedSecurity()
+ data["persistent_store"] = features.GetPersistentStore()
+
+ return []interface{}{data}
+}
+
+func flattenFabricsIngressData(ingress *rtf.Ingress) []interface{} {
+ if ingress == nil {
+ return []interface{}{}
+ }
+ data := make(map[string]interface{})
+ data["domains"] = ingress.GetDomains()
+
+ return []interface{}{data}
+}
+
+func flattenFabricsNodesData(nodes []rtf.FabricsNode) []interface{} {
+ if len(nodes) == 0 {
+ return make([]interface{}, 0)
+ }
+
+ res := make([]interface{}, len(nodes))
+ for i, node := range nodes {
+ item := make(map[string]interface{})
+
+ if val, ok := node.GetUidOk(); ok {
+ item["uid"] = *val
+ }
+ if val, ok := node.GetNameOk(); ok {
+ item["name"] = *val
+ }
+ if val, ok := node.GetKubeletVersionOk(); ok {
+ item["kubelet_version"] = *val
+ }
+ if val, ok := node.GetDockerVersionOk(); ok {
+ item["docker_version"] = *val
+ }
+ if val, ok := node.GetRoleOk(); ok {
+ item["role"] = *val
+ }
+ if val, ok := node.GetStatusOk(); ok {
+ item["status"] = flattenFabricsNodeStatusData(val)
+ }
+ if val, ok := node.GetCapacityOk(); ok {
+ item["capacity"] = flattenFabricsNodeCapacityData(val)
+ }
+ if val, ok := node.GetAllocatedRequestCapacityOk(); ok {
+ item["allocated_request_capacity"] = flattenFabricsNodeAllocReqCapacityData(val)
+ }
+ if val, ok := node.GetAllocatedLimitCapacityOk(); ok {
+ item["allocated_limit_capacity"] = flattenFabricsNodeAllocLimitCapacityData(val)
+ }
+
+ res[i] = item
+ }
+
+ return res
+}
+
+func flattenFabricsNodeStatusData(status *rtf.Status) []interface{} {
+ if status == nil {
+ return []interface{}{}
+ }
+ data := make(map[string]interface{})
+ data["is_healthy"] = status.GetIsHealthy()
+ data["is_ready"] = status.GetIsReady()
+ data["is_schedulable"] = status.GetIsSchedulable()
+
+ return []interface{}{data}
+}
+
+func flattenFabricsNodeAllocReqCapacityData(capacity *rtf.AllocatedRequestCapacity) []interface{} {
+ if capacity == nil {
+ return []interface{}{}
+ }
+ data := make(map[string]interface{})
+ data["cpu"] = capacity.GetCpu()
+ data["cpu_millis"] = capacity.GetCpuMillis()
+ data["memory"] = capacity.GetMemory()
+ data["memory_mi"] = capacity.GetMemoryMi()
+ data["pods"] = capacity.GetPods()
+
+ return []interface{}{data}
+}
+
+func flattenFabricsNodeAllocLimitCapacityData(capacity *rtf.AllocatedLimitCapacity) []interface{} {
+ if capacity == nil {
+ return []interface{}{}
+ }
+ data := make(map[string]interface{})
+ data["cpu"] = capacity.GetCpu()
+ data["cpu_millis"] = capacity.GetCpuMillis()
+ data["memory"] = capacity.GetMemory()
+ data["memory_mi"] = capacity.GetMemoryMi()
+ data["pods"] = capacity.GetPods()
+
+ return []interface{}{data}
+}
+
+func flattenFabricsNodeCapacityData(capacity *rtf.Capacity) []interface{} {
+ if capacity == nil {
+ return []interface{}{}
+ }
+ data := make(map[string]interface{})
+ data["cpu"] = capacity.GetCpu()
+ data["cpu_millis"] = capacity.GetCpuMillis()
+ data["memory"] = capacity.GetMemory()
+ data["memory_mi"] = capacity.GetMemoryMi()
+ data["pods"] = capacity.GetPods()
+
+ return []interface{}{data}
+}
+
+func setFabricsResourceData(d *schema.ResourceData, data map[string]interface{}) error {
+ attributes := getFabricsAttributes()
+ if data != nil {
+ for _, attr := range attributes {
+ if val, ok := data[attr]; ok {
+ if err := d.Set(attr, val); err != nil {
+ return fmt.Errorf("unable to set fabrics attribute %s\n\tdetails: %s", attr, err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func getFabricsAttributes() []string {
+ attributes := [...]string{
+ "name", "region", "vendor", "vendor_metadata", "version",
+ "status", "desired_version", "available_upgrade_version", "created_at",
+ "upgrade", "nodes", "activation_data", "seconds_since_heartbeat", "kubernetes_version",
+ "namespace", "license_expiry_date", "is_managed", "is_helm_managed", "app_scoped_log_forwarding",
+ "cluster_configuration_level", "features", "ingress",
+ }
+ return attributes[:]
+}
diff --git a/anypoint/data_source_fabrics_associations.go b/anypoint/data_source_fabrics_associations.go
new file mode 100644
index 0000000..32c5b68
--- /dev/null
+++ b/anypoint/data_source_fabrics_associations.go
@@ -0,0 +1,139 @@
+package anypoint
+
+import (
+ "context"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ rtf "github.com/mulesoft-anypoint/anypoint-client-go/rtf"
+)
+
+func dataSourceFabricsAssociations() *schema.Resource {
+ return &schema.Resource{
+ ReadContext: dataSourceFabricsAssociationsRead,
+ Description: `
+ Reads all ` + "`" + `Runtime Fabrics'` + "`" + ` available in your org.
+ `,
+ Schema: map[string]*schema.Schema{
+ "org_id": {
+ Type: schema.TypeString,
+ Description: "The business group id",
+ Required: true,
+ },
+ "fabrics_id": {
+ Type: schema.TypeString,
+ Description: "The runtime fabrics id",
+ Required: true,
+ },
+ "associations": {
+ Type: schema.TypeList,
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The unique id of the fabrics instance in the platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The organization id associated with fabrics.",
+ },
+ "env_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The environment associated with fabrics.",
+ },
+ },
+ },
+ },
+ "total": {
+ Type: schema.TypeInt,
+ Description: "The total number of available results",
+ Computed: true,
+ },
+ },
+ }
+}
+
+func dataSourceFabricsAssociationsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ orgid := d.Get("org_id").(string)
+ fabricsId := d.Get("fabrics_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ //perform request
+ res, httpr, err := pco.rtfclient.DefaultApi.GetFabricsAssociations(authctx, orgid, fabricsId).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to get fabrics associations",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ list := flattenFabricsAssociationsData(res)
+ //save in data source schema
+ if err := d.Set("associations", list); err != nil {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set fabrics associations",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+
+ if err := d.Set("total", len(list)); err != nil {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set total number fabrics associations",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+
+ d.SetId(strconv.FormatInt(time.Now().Unix(), 10))
+
+ return diags
+}
+
+func flattenFabricsAssociationsData(associations []rtf.FabricsAssociationsInner) []interface{} {
+ if len(associations) == 0 {
+ return make([]interface{}, 0)
+ }
+ res := make([]interface{}, len(associations))
+ for i, association := range associations {
+ res[i] = flattenFabricsAssociationData(&association)
+ }
+ return res
+}
+
+func flattenFabricsAssociationData(association *rtf.FabricsAssociationsInner) map[string]interface{} {
+ mappedItem := make(map[string]interface{})
+
+ if val, ok := association.GetIdOk(); ok {
+ mappedItem["id"] = *val
+ }
+ if val, ok := association.GetOrganizationIdOk(); ok {
+ mappedItem["org_id"] = *val
+ }
+ if val, ok := association.GetEnvironmentIdOk(); ok {
+ mappedItem["env_id"] = *val
+ }
+
+ return mappedItem
+}
diff --git a/anypoint/data_source_fabrics_health.go b/anypoint/data_source_fabrics_health.go
new file mode 100644
index 0000000..033b26b
--- /dev/null
+++ b/anypoint/data_source_fabrics_health.go
@@ -0,0 +1,269 @@
+package anypoint
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/mulesoft-anypoint/anypoint-client-go/rtf"
+)
+
+var FabricsHealthStatusDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "healthy": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "True if the component is healthy",
+ },
+ "updated_at": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ "probes": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Probes collected for this health check. Only applicable for Appliance probes.",
+ },
+ "failed_probes": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Probe failures attributing to the result of this health check.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "reason": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "last_transition_at": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+}
+
+func dataSourceFabricsHealth() *schema.Resource {
+ return &schema.Resource{
+ ReadContext: dataSourceFabricsHealthRead,
+ Description: `
+ Reads ` + "`" + `Runtime Fabrics'` + "`" + ` health and monitoring metrics.
+ `,
+ Schema: map[string]*schema.Schema{
+ "org_id": {
+ Type: schema.TypeString,
+ Description: "The business group id",
+ Required: true,
+ },
+ "fabrics_id": {
+ Type: schema.TypeString,
+ Description: "The runtime fabrics id",
+ Required: true,
+ },
+ "cluster_monitoring": {
+ Type: schema.TypeList,
+ Description: "The ability to monitor and report the status of the Runtime Fabric cluster.",
+ Computed: true,
+ Elem: FabricsHealthStatusDefinition,
+ },
+ "manage_deployments": {
+ Type: schema.TypeList,
+ Description: "The ability to create, update, or delete application deployments in this Runtime Fabric.",
+ Computed: true,
+ Elem: FabricsHealthStatusDefinition,
+ },
+ "load_balancing": {
+ Type: schema.TypeList,
+ Description: "The ability to accept inbound requests and load-balance across different replicas of application instances.",
+ Computed: true,
+ Elem: FabricsHealthStatusDefinition,
+ },
+ "anypoint_monitoring": {
+ Type: schema.TypeList,
+ Description: "The ability to see metrics and logs in Anypoint Monitoring.",
+ Computed: true,
+ Elem: FabricsHealthStatusDefinition,
+ },
+ "external_log_forwarding": {
+ Type: schema.TypeList,
+ Description: "The ability to forward application logs to an external provider.",
+ Computed: true,
+ Elem: FabricsHealthStatusDefinition,
+ },
+ "appliance": {
+ Type: schema.TypeList,
+ Description: "Detailed status of the appliance, when applicable.",
+ Computed: true,
+ Elem: FabricsHealthStatusDefinition,
+ },
+ "infrastructure": {
+ Type: schema.TypeList,
+ Description: "Detailed status of the infrastructure supporting the Runtime Fabric cluster.",
+ Computed: true,
+ Elem: FabricsHealthStatusDefinition,
+ },
+ "persistent_gateway": {
+ Type: schema.TypeList,
+ Description: "Detailed status of the persistent gateway for Runtime Fabric cluster.",
+ Computed: true,
+ Elem: FabricsHealthStatusDefinition,
+ },
+ },
+ }
+}
+
+func dataSourceFabricsHealthRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ orgid := d.Get("org_id").(string)
+ fabricsid := d.Get("fabrics_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ //perform request
+ res, httpr, err := pco.rtfclient.DefaultApi.GetFabricsHealth(authctx, orgid, fabricsid).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to get fabrics health metrics",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ data := flattenFabricsHealthData(res)
+ //save in data source schema
+ if err := setFabricsHealthResourceData(d, data); err != nil {
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set fabrics health attributes",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+
+ d.SetId(strconv.FormatInt(time.Now().Unix(), 10))
+
+ return diags
+}
+
+func flattenFabricsHealthData(data *rtf.FabricsHealth) map[string]interface{} {
+ mappedItem := make(map[string]interface{})
+
+ if val, ok := data.GetClusterMonitoringOk(); ok {
+ mappedItem["cluster_monitoring"] = []interface{}{flattenFabricsHealthStatusData(val)}
+ } else {
+ mappedItem["cluster_monitoring"] = []interface{}{}
+ }
+ if val, ok := data.GetManageDeploymentsOk(); ok {
+ mappedItem["manage_deployments"] = []interface{}{flattenFabricsHealthStatusData(val)}
+ } else {
+ mappedItem["manage_deployments"] = []interface{}{}
+ }
+ if val, ok := data.GetLoadBalancingOk(); ok {
+ mappedItem["load_balancing"] = []interface{}{flattenFabricsHealthStatusData(val)}
+ } else {
+ mappedItem["load_balancing"] = []interface{}{}
+ }
+ if val, ok := data.GetAnypointMonitoringOk(); ok {
+ mappedItem["anypoint_monitoring"] = []interface{}{flattenFabricsHealthStatusData(val)}
+ } else {
+ mappedItem["anypoint_monitoring"] = []interface{}{}
+ }
+ if val, ok := data.GetExternalLogForwardingOk(); ok {
+ mappedItem["external_log_forwarding"] = []interface{}{flattenFabricsHealthStatusData(val)}
+ } else {
+ mappedItem["external_log_forwarding"] = []interface{}{}
+ }
+ if val, ok := data.GetApplianceOk(); ok {
+ mappedItem["appliance"] = []interface{}{flattenFabricsHealthStatusData(val)}
+ } else {
+ mappedItem["appliance"] = []interface{}{}
+ }
+ if val, ok := data.GetInfrastructureOk(); ok {
+ mappedItem["infrastructure"] = []interface{}{flattenFabricsHealthStatusData(val)}
+ } else {
+ mappedItem["infrastructure"] = []interface{}{}
+ }
+ if val, ok := data.GetPersistentGatewayOk(); ok {
+ mappedItem["persistent_gateway"] = []interface{}{flattenFabricsHealthStatusData(val)}
+ } else {
+ mappedItem["persistent_gateway"] = []interface{}{}
+ }
+
+ return mappedItem
+}
+
+func flattenFabricsHealthStatusData(data *rtf.FabricsHealthStatus) map[string]interface{} {
+ mappedItem := make(map[string]interface{})
+ if val, ok := data.GetHealthyOk(); ok {
+ mappedItem["healthy"] = *val
+ }
+ if val, ok := data.GetProbesOk(); ok {
+ mappedItem["probes"] = *val
+ }
+ if val, ok := data.GetUpdatedAtOk(); ok {
+ mappedItem["updated_at"] = *val
+ }
+ if val, ok := data.GetFailedProbesOk(); ok {
+ list := make([]interface{}, len(val))
+ for i, fhsfb := range val {
+ list[i] = flattenFabricsHealthStatusFailedProbesInner(&fhsfb)
+ }
+ mappedItem["failed_probes"] = list
+ }
+ return mappedItem
+}
+
+func flattenFabricsHealthStatusFailedProbesInner(data *rtf.FabricsHealthStatusFailedProbesInner) map[string]interface{} {
+ mappedItem := make(map[string]interface{})
+ if val, ok := data.GetNameOk(); ok {
+ mappedItem["name"] = *val
+ }
+ if val, ok := data.GetReasonOk(); ok {
+ mappedItem["reason"] = *val
+ }
+ if val, ok := data.GetLastTransitionAtOk(); ok {
+ mappedItem["last_transition_at"] = *val
+ }
+ return mappedItem
+}
+
+func setFabricsHealthResourceData(d *schema.ResourceData, data map[string]interface{}) error {
+ attributes := getFabricsHealthAttributes()
+ if data != nil {
+ for _, attr := range attributes {
+ if val, ok := data[attr]; ok {
+ if err := d.Set(attr, val); err != nil {
+ return fmt.Errorf("unable to set fabrics helm repo attribute %s\n\tdetails: %s", attr, err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func getFabricsHealthAttributes() []string {
+ attributes := [...]string{
+ "cluster_monitoring", "manage_deployments", "load_balancing",
+ "anypoint_monitoring", "external_log_forwarding", "appliance",
+ "infrastructure", "persistent_gateway",
+ }
+ return attributes[:]
+}
diff --git a/anypoint/data_source_fabrics_helm_repo.go b/anypoint/data_source_fabrics_helm_repo.go
new file mode 100644
index 0000000..a60a688
--- /dev/null
+++ b/anypoint/data_source_fabrics_helm_repo.go
@@ -0,0 +1,126 @@
+package anypoint
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ rtf "github.com/mulesoft-anypoint/anypoint-client-go/rtf"
+)
+
+func dataSourceFabricsHelmRepoProps() *schema.Resource {
+ return &schema.Resource{
+ ReadContext: dataSourceFabricsHelmRepoPropsRead,
+ Description: `
+ Reads ` + "`" + `Runtime Fabrics'` + "`" + ` Helm repository properties.
+ `,
+ Schema: map[string]*schema.Schema{
+ "org_id": {
+ Type: schema.TypeString,
+ Description: "The business group id",
+ Required: true,
+ },
+ "rtf_image_registry_endpoint": {
+ Type: schema.TypeString,
+ Description: "The runtime fabrics image registry endpoint",
+ Computed: true,
+ },
+ "rtf_image_registry_user": {
+ Type: schema.TypeString,
+ Description: "The user to authenticated to the image registry",
+ Computed: true,
+ },
+ "rtf_image_registry_password": {
+ Type: schema.TypeString,
+ Description: "The password to authenticated to the image registry",
+ Computed: true,
+ Sensitive: true,
+ },
+ },
+ }
+}
+
+func dataSourceFabricsHelmRepoPropsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ orgid := d.Get("org_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ //perform request
+ res, httpr, err := pco.rtfclient.DefaultApi.GetFabricsHelmRepoProps(authctx, orgid).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to get fabrics helm repository props",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ data := flattenFabricsHelmRepoProps(res)
+ //save in data source schema
+ if err := setFabricsHelmRepoResourceData(d, data); err != nil {
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set fabrics helm repository props attributes",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+
+ d.SetId(strconv.FormatInt(time.Now().Unix(), 10))
+
+ return diags
+}
+
+func flattenFabricsHelmRepoProps(props *rtf.FabricsHelmRepoProps) map[string]interface{} {
+ data := make(map[string]interface{})
+ if props == nil {
+ return data
+ }
+
+ if val, ok := props.GetRTF_IMAGE_REGISTRY_ENDPOINTOk(); ok {
+ data["rtf_image_registry_endpoint"] = *val
+ }
+ if val, ok := props.GetRTF_IMAGE_REGISTRY_USEROk(); ok {
+ data["rtf_image_registry_user"] = *val
+ }
+ if val, ok := props.GetRTF_IMAGE_REGISTRY_PASSWORDOk(); ok {
+ data["rtf_image_registry_password"] = *val
+ }
+
+ return data
+}
+
+func setFabricsHelmRepoResourceData(d *schema.ResourceData, data map[string]interface{}) error {
+ attributes := getFabricsHelmRepoAttributes()
+ if data != nil {
+ for _, attr := range attributes {
+ if val, ok := data[attr]; ok {
+ if err := d.Set(attr, val); err != nil {
+ return fmt.Errorf("unable to set fabrics helm repo attribute %s\n\tdetails: %s", attr, err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func getFabricsHelmRepoAttributes() []string {
+ attributes := [...]string{
+ "rtf_image_registry_endpoint", "rtf_image_registry_user", "rtf_image_registry_password",
+ }
+ return attributes[:]
+}
diff --git a/anypoint/data_source_fabrics_list.go b/anypoint/data_source_fabrics_list.go
new file mode 100644
index 0000000..1d8367e
--- /dev/null
+++ b/anypoint/data_source_fabrics_list.go
@@ -0,0 +1,232 @@
+package anypoint
+
+import (
+ "context"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ rtf "github.com/mulesoft-anypoint/anypoint-client-go/rtf"
+)
+
+func dataSourceFabricsCollection() *schema.Resource {
+ return &schema.Resource{
+ ReadContext: dataSourceAllFabricsRead,
+ Description: `
+ Reads all ` + "`" + `Runtime Fabrics'` + "`" + ` available in your org.
+ `,
+ Schema: map[string]*schema.Schema{
+ "org_id": {
+ Type: schema.TypeString,
+ Description: "The business group id",
+ Required: true,
+ },
+ "list": {
+ Type: schema.TypeList,
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The unique id of the fabrics instance in the platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The organization id where the fabrics is hosted.",
+ },
+ "name": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The name of this fabrics instance.",
+ },
+ "region": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The region where fabrics instance is hosted.",
+ },
+ "vendor": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The vendor name of the kubernetes instance hosting fabrics.",
+ },
+ "vendor_metadata": {
+ Type: schema.TypeMap,
+ Computed: true,
+ Description: "The vendor metadata",
+ },
+ "version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The version of fabrics.",
+ },
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The status of the farbics instance.",
+ },
+ "desired_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The desired version of fabrics.",
+ },
+ "available_upgrade_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The available upgrade version of fabrics.",
+ },
+ "created_at": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The creation date of the fabrics instance.",
+ },
+ "upgrade": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The status of the fabrics. Only available when instance is created and not activated yet.",
+ Elem: FabricsUpgradeDefinition,
+ },
+ "nodes": {
+ Type: schema.TypeList,
+ Computed: true,
+ Elem: NodeDefinition,
+ Description: "The list of fabrics nodes.",
+ },
+ "activation_data": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The activation data to use during installation of fabrics on the kubernetes cluster. Only available when instance is created and not activated yet.",
+ },
+ "seconds_since_heartbeat": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The number of seconds since last heartbeat.",
+ },
+ "kubernetes_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The kubernetes version of the cluster.",
+ },
+ "namespace": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The namespace where runtime fabrics is installed.",
+ },
+ "license_expiry_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The expiry date of the license (timestamp).",
+ },
+ "is_managed": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether this cluster is managed.",
+ },
+ "is_helm_managed": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether this cluster is managed by helmet.",
+ },
+ "app_scoped_log_forwarding": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether app scoped log forwarding is active.",
+ },
+ "cluster_configuration_level": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The configuration level of the cluster (production or development).",
+ },
+ "features": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The features of this cluster.",
+ Elem: FabricsFeaturesDefinition,
+ },
+ "ingress": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The ingress configurations of this cluster.",
+ Elem: FabricsIngressDomainsDefinition,
+ },
+ },
+ },
+ },
+ "total": {
+ Type: schema.TypeInt,
+ Description: "The total number of available results",
+ Computed: true,
+ },
+ },
+ }
+}
+
+func dataSourceAllFabricsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ orgid := d.Get("org_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ //perform request
+ res, httpr, err := pco.rtfclient.DefaultApi.GetAllFabrics(authctx, orgid).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to get fabrics list",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ list := flattenFabricsCollectionData(res)
+ //save in data source schema
+ if err := d.Set("list", list); err != nil {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set fabrics list",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+
+ if err := d.Set("total", len(list)); err != nil {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set total number fabrics",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+
+ d.SetId(strconv.FormatInt(time.Now().Unix(), 10))
+
+ return diags
+}
+
+/*
+* Transforms a set of runtime fabrics to the dataSourceFabricsCollection schema
+* @param fabricsCollection *[]rtf.Fabrics the list of fabrics
+* @return list of generic items
+ */
+func flattenFabricsCollectionData(fabricsCollection []rtf.Fabrics) []interface{} {
+ if len(fabricsCollection) == 0 {
+ return []interface{}{}
+ }
+
+ data := make([]interface{}, len(fabricsCollection))
+ for i, fabrics := range fabricsCollection {
+ data[i] = flattenFabricsData(&fabrics)
+ }
+ return data
+}
diff --git a/anypoint/data_source_roles.go b/anypoint/data_source_roles.go
index 3484ae3..274a443 100644
--- a/anypoint/data_source_roles.go
+++ b/anypoint/data_source_roles.go
@@ -15,7 +15,7 @@ func dataSourceRoles() *schema.Resource {
return &schema.Resource{
ReadContext: dataSourceRolesRead,
Description: `
- Reads all ` + "`" + `roles` + "`" + ` availabble.
+ Reads all ` + "`" + `roles` + "`" + ` available.
`,
Schema: map[string]*schema.Schema{
"params": {
diff --git a/anypoint/provider_clients.go b/anypoint/provider_clients.go
index db9cf04..1b504a6 100644
--- a/anypoint/provider_clients.go
+++ b/anypoint/provider_clients.go
@@ -7,6 +7,7 @@ import (
apim "github.com/mulesoft-anypoint/anypoint-client-go/apim"
"github.com/mulesoft-anypoint/anypoint-client-go/apim_policy"
apim_upstream "github.com/mulesoft-anypoint/anypoint-client-go/apim_upstream"
+ application_manager_v2 "github.com/mulesoft-anypoint/anypoint-client-go/application_manager_v2"
connected_app "github.com/mulesoft-anypoint/anypoint-client-go/connected_app"
dlb "github.com/mulesoft-anypoint/anypoint-client-go/dlb"
env "github.com/mulesoft-anypoint/anypoint-client-go/env"
@@ -15,6 +16,7 @@ import (
org "github.com/mulesoft-anypoint/anypoint-client-go/org"
role "github.com/mulesoft-anypoint/anypoint-client-go/role"
rolegroup "github.com/mulesoft-anypoint/anypoint-client-go/rolegroup"
+ rtf "github.com/mulesoft-anypoint/anypoint-client-go/rtf"
secretgroup "github.com/mulesoft-anypoint/anypoint-client-go/secretgroup"
secretgroup_certificate "github.com/mulesoft-anypoint/anypoint-client-go/secretgroup_certificate"
secretgroup_crl_distributor_configs "github.com/mulesoft-anypoint/anypoint-client-go/secretgroup_crl_distributor_configs"
@@ -62,6 +64,8 @@ type ProviderConfOutput struct {
sgcertificateclient *secretgroup_certificate.APIClient
sgtlscontextclient *secretgroup_tlscontext.APIClient
sgcrldistribcfgsclient *secretgroup_crl_distributor_configs.APIClient
+ rtfclient *rtf.APIClient
+ appmanagerclient *application_manager_v2.APIClient
}
func newProviderConfOutput(access_token string, server_index int) ProviderConfOutput {
@@ -94,6 +98,8 @@ func newProviderConfOutput(access_token string, server_index int) ProviderConfOu
sgcertificatecfg := secretgroup_certificate.NewConfiguration()
sgtlscontextcfg := secretgroup_tlscontext.NewConfiguration()
sgcrldistribcfgs_cfg := secretgroup_crl_distributor_configs.NewConfiguration()
+ rtf_cfg := rtf.NewConfiguration()
+ appmanager_cfg := application_manager_v2.NewConfiguration()
vpcclient := vpc.NewAPIClient(vpccfg)
vpnclient := vpn.NewAPIClient(vpncfg)
@@ -123,6 +129,8 @@ func newProviderConfOutput(access_token string, server_index int) ProviderConfOu
sgcertificateclient := secretgroup_certificate.NewAPIClient(sgcertificatecfg)
sgtlscontextclient := secretgroup_tlscontext.NewAPIClient(sgtlscontextcfg)
sgcrldistribcfgsclient := secretgroup_crl_distributor_configs.NewAPIClient(sgcrldistribcfgs_cfg)
+ rtfclient := rtf.NewAPIClient(rtf_cfg)
+ appmanagerclient := application_manager_v2.NewAPIClient(appmanager_cfg)
return ProviderConfOutput{
access_token: access_token,
@@ -155,5 +163,7 @@ func newProviderConfOutput(access_token string, server_index int) ProviderConfOu
sgcertificateclient: sgcertificateclient,
sgtlscontextclient: sgtlscontextclient,
sgcrldistribcfgsclient: sgcrldistribcfgsclient,
+ rtfclient: rtfclient,
+ appmanagerclient: appmanagerclient,
}
}
diff --git a/anypoint/provider_datasources.go b/anypoint/provider_datasources.go
index f6becc9..6f70718 100644
--- a/anypoint/provider_datasources.go
+++ b/anypoint/provider_datasources.go
@@ -52,4 +52,11 @@ var DATASOURCES_MAP = map[string]*schema.Resource{
"anypoint_secretgroup_crldistrib_cfgs": dataSourceSecretGroupCrlDistribCfgs(),
"anypoint_exchange_policy_templates": dataSourceExchangePolicyTemplates(),
"anypoint_exchange_policy_template": dataSourceExchangePolicyTemplate(),
+ "anypoint_fabrics_list": dataSourceFabricsCollection(),
+ "anypoint_fabrics": dataSourceFabrics(),
+ "anypoint_fabrics_associations": dataSourceFabricsAssociations(),
+ "anypoint_fabrics_helm_repo": dataSourceFabricsHelmRepoProps(),
+ "anypoint_fabrics_health": dataSourceFabricsHealth(),
+ "anypoint_app_deployment_v2": dataSourceAppDeploymentV2(),
+ "anypoint_app_deployments_v2": dataSourceAppDeploymentsV2(),
}
diff --git a/anypoint/provider_resources.go b/anypoint/provider_resources.go
index a65d432..62defea 100644
--- a/anypoint/provider_resources.go
+++ b/anypoint/provider_resources.go
@@ -38,4 +38,8 @@ var RESOURCES_MAP = map[string]*schema.Resource{
"anypoint_secretgroup_tlscontext_mule": resourceSecretGroupTlsContextMule(),
"anypoint_secretgroup_tlscontext_securityfabric": resourceSecretGroupTlsContextSF(),
"anypoint_secretgroup_crldistrib_cfgs": resourceSecretGroupCrlDistribCfgs(),
+ "anypoint_fabrics": resourceFabrics(),
+ "anypoint_fabrics_associations": resourceFabricsAssociations(),
+ "anypoint_cloudhub2_shared_space_deployment": resourceCloudhub2SharedSpaceDeployment(),
+ "anypoint_rtf_deployment": resourceRTFDeployment(),
}
diff --git a/anypoint/resource_apim_policy_message_logging.go b/anypoint/resource_apim_policy_message_logging.go
index 5996d10..d85006b 100644
--- a/anypoint/resource_apim_policy_message_logging.go
+++ b/anypoint/resource_apim_policy_message_logging.go
@@ -433,7 +433,7 @@ func disableApimInstancePolicyMessageLogging(ctx context.Context, d *schema.Reso
return diags
}
-func flattenApimPolicyMessageLoggingCfg(d *schema.ResourceData, policy *apim_policy.ApimPolicy) map[string]interface{} {
+func flattenApimPolicyMessageLoggingCfg(_ *schema.ResourceData, policy *apim_policy.ApimPolicy) map[string]interface{} {
data := make(map[string]interface{})
cfg := policy.GetConfigurationData()
logging_cfg := cfg["loggingConfiguration"].([]interface{})
diff --git a/anypoint/resource_cloudhub2_shared_space_deployment.go b/anypoint/resource_cloudhub2_shared_space_deployment.go
new file mode 100644
index 0000000..75f3297
--- /dev/null
+++ b/anypoint/resource_cloudhub2_shared_space_deployment.go
@@ -0,0 +1,874 @@
+package anypoint
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/hashicorp/go-cty/cty"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+ application_manager_v2 "github.com/mulesoft-anypoint/anypoint-client-go/application_manager_v2"
+)
+
+var DeplApplicationConfigLoggingC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "artifact_name": {
+ Type: schema.TypeString,
+ Description: "The application name.",
+ Computed: true,
+ },
+ "scope_logging_configurations": {
+ Type: schema.TypeList,
+ Description: "Additional log levels and categories to include in logs.",
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "scope": {
+ Type: schema.TypeString,
+ Description: "The logging package scope",
+ Required: true,
+ },
+ "log_level": {
+ Type: schema.TypeString,
+ Description: "The application log level: INFO / DEBUG / WARNING / ERROR / FATAL",
+ Required: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"INFO", "DEBUG", "WARNING", "ERROR", "FATAL"}, false),
+ ),
+ },
+ },
+ },
+ },
+ },
+}
+
+var DeplApplicationConfigPropsC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "application_name": {
+ Type: schema.TypeString,
+ Description: "The application name",
+ Computed: true,
+ },
+ "properties": {
+ Type: schema.TypeMap,
+ Description: "The mule application properties.",
+ Optional: true,
+ DefaultFunc: func() (interface{}, error) { return make(map[string]string), nil },
+ },
+ "secure_properties": {
+ Type: schema.TypeMap,
+ Description: "The mule application secured properties.",
+ Optional: true,
+ DefaultFunc: func() (interface{}, error) { return make(map[string]string), nil },
+ },
+ },
+}
+
+var DeplApplicationConfigC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "mule_agent_app_props_service": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Description: "The mule app properties",
+ Elem: DeplApplicationConfigPropsC2SSDefinition,
+ Required: true,
+ },
+ "mule_agent_logging_service": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Description: "The mule app logging props",
+ Elem: DeplApplicationConfigLoggingC2SSDefinition,
+ Optional: true,
+ },
+ "mule_agent_scheduling_service": {
+ Type: schema.TypeList,
+ Description: "The mule app scheduling",
+ Elem: DeplApplicationConfigSchedulingReadOnlyDefinition,
+ Computed: true,
+ },
+ },
+}
+
+var DeplApplicationRefC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "group_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The groupId of the application.",
+ },
+ "artifact_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The artifactId of the application.",
+ },
+ "version": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The version of the application.",
+ },
+ "packaging": {
+ Type: schema.TypeString,
+ Required: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"jar"}, false),
+ ),
+ Description: "The packaging of the application. Only 'jar' is supported.",
+ },
+ },
+}
+
+var DeplApplicationC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The status of the application.",
+ },
+ "desired_state": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "STARTED",
+ Description: "The desired state of the application.",
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice(
+ []string{
+ "PARTIALLY_STARTED", "DEPLOYMENT_FAILED", "STARTING", "STARTED", "STOPPING",
+ "STOPPED", "UNDEPLOYING", "UNDEPLOYED", "UPDATED", "APPLIED", "APPLYING", "FAILED", "DELETED",
+ },
+ false,
+ ),
+ ),
+ },
+ "ref": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Required: true,
+ Description: `
+ The reference to the artifact on Exchange that is to be deployed on Cloudhub 2.0.
+ Please ensure the application's artifact is deployed on Exchange before using this resource on Cloudhub 2.0.
+ `,
+ Elem: DeplApplicationRefC2SSDefinition,
+ },
+ "configuration": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Required: true,
+ Description: "The configuration of the application.",
+ Elem: DeplApplicationConfigC2SSDefinition,
+ },
+ "vcores": {
+ Type: schema.TypeFloat,
+ Required: true,
+ Description: "The allocated virtual cores. Acceptable Values are: 0.1 / 0.2 / 0.5 / 1 / 1.5 / 2 / 2.5 / 3 / 3.5 / 4",
+ ValidateDiagFunc: VCoresValidatorDiag,
+ },
+ "object_store_v2_enabled": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: false,
+ Description: "Whether object store v2 is enabled.",
+ },
+ },
+}
+
+var DeplTargetDeplSettHttpC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "inbound_public_url": {
+ Type: schema.TypeString,
+ Description: "The inbound public url. Setting the public url is disabled for shared-space.",
+ Computed: true,
+ },
+ "inbound_path_rewrite": {
+ Type: schema.TypeString,
+ Description: "The inbound path rewrite. This option is disabled for shared-space.",
+ Computed: true,
+ },
+ "inbound_last_mile_security": {
+ Type: schema.TypeBool,
+ Description: "Last-mile security means that the connection between ingress and the actual Mule app will be HTTPS.",
+ Optional: true,
+ Default: false,
+ },
+ "inbound_forward_ssl_session": {
+ Type: schema.TypeBool,
+ Description: "Whether to forward the ssl session. This option is disabled for shared-space.",
+ Computed: true,
+ },
+ "inbound_internal_url": {
+ Type: schema.TypeString,
+ Description: "The inbound internal url.",
+ Computed: true,
+ },
+ "inbound_unique_id": {
+ Type: schema.TypeString,
+ Description: "The inbound unique id.",
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetDeplSettRuntimeC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "version": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to:
+ - a full image version with tag (i.e "4.6.0:40e-java17"),
+ - a base version with a partial tag not indicating the java version (i.e. "4.6.0:40")
+ - or only a base version (i.e. "4.6.0").
+ Defaults to the latest image version.
+ This field has precedence over the legacy 'target.deploymentSettings.runtimeVersion'.
+ Learn more about Mule runtime release notes [here](https://docs.mulesoft.com/release-notes/runtime-fabric/runtime-fabric-runtimes-release-notes)
+ `,
+ Required: true,
+ },
+ "release_channel": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to one of:
+ - "LTS"
+ - "EDGE"
+ - "LEGACY".
+ Defaults to "EDGE". This field has precedence over the legacy 'target.deploymentSettings.runtimeReleaseChannel'.
+ Learn more on release channels [here](https://docs.mulesoft.com/release-notes/mule-runtime/lts-edge-release-cadence).
+ `,
+ Optional: true,
+ Default: "EDGE",
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"LTS", "EDGE", "LEGACY"}, false),
+ ),
+ },
+ "java": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to one of:
+ - "8"
+ - "17"
+ Defaults to "8".
+ Learn more about Java support [here](https://docs.mulesoft.com/general/java-support).
+ `,
+ Optional: true,
+ Default: "8",
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"8", "17"}, false),
+ ),
+ },
+ },
+}
+
+var DeplTargetDeplSettAutoscalingC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enabled": {
+ Type: schema.TypeBool,
+ Description: "Enables or disables the Autoscaling feature. The possible values are: true or false.",
+ Required: true,
+ },
+ "min_replicas": {
+ Type: schema.TypeInt,
+ Description: "Set the minimum amount of replicas for your deployment. The minimum accepted value is 1. The maximum is 3.",
+ Optional: true,
+ Default: 1,
+ ValidateDiagFunc: validation.ToDiagFunc(validation.IntBetween(1, 3)),
+ },
+ "max_replicas": {
+ Type: schema.TypeInt,
+ Description: "Set the maximum amount of replicas your application can scale to. The minimum accepted value is 2. The maximum is 32.",
+ Optional: true,
+ Default: 2,
+ ValidateDiagFunc: validation.ToDiagFunc(validation.IntBetween(2, 32)),
+ },
+ },
+}
+
+var DeplTargetDeploymentSettingsC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "clustered": {
+ Type: schema.TypeBool,
+ Description: "Whether the application is deployed in clustered mode.",
+ Optional: true,
+ Default: false,
+ },
+ // "enforce_deploying_replicas_across_nodes": {
+ // Type: schema.TypeBool,
+ // Description: "If true, forces the deployment of replicas across the RTF cluster. This option only available for Runtime Fabrics.",
+ // Computed: true,
+ // },
+ "http": {
+ Type: schema.TypeList,
+ Description: "The details about http inbound or outbound configuration",
+ Optional: true,
+ MaxItems: 1,
+ DefaultFunc: func() (interface{}, error) {
+ dict := make(map[string]interface{})
+ dict["inbound_last_mile_security"] = false
+ return []interface{}{dict}, nil
+ },
+ Elem: DeplTargetDeplSettHttpC2SSDefinition,
+ },
+ "jvm_args": {
+ Type: schema.TypeString,
+ Description: "The java virtual machine arguments",
+ Optional: true,
+ Default: "",
+ },
+ "runtime": {
+ Type: schema.TypeList,
+ Description: "The Mule app runtime version info.",
+ Optional: true,
+ MaxItems: 1,
+ Elem: DeplTargetDeplSettRuntimeC2SSDefinition,
+ },
+ "autoscaling": {
+ Type: schema.TypeList,
+ Description: `
+ Use this object to provide CPU Based Horizontal Autoscaling configuration on deployment and redeployment operations. This object is optional.
+ If Autoscaling is disabled and the fields "minReplicas" and "maxReplicas" are provided, they must match the value of "target.replicas" field.
+ Learn more about Autoscaling [here](https://docs.mulesoft.com/cloudhub-2/ch2-configure-horizontal-autoscaling).
+ `,
+ Optional: true,
+ MaxItems: 1,
+ DefaultFunc: func() (interface{}, error) {
+ dict := make(map[string]interface{})
+ dict["enabled"] = false
+ return []interface{}{dict}, nil
+ },
+ Elem: DeplTargetDeplSettAutoscalingC2SSDefinition,
+ },
+ "update_strategy": {
+ Type: schema.TypeString,
+ Description: "The mule app deployment update strategy: rolling or recreate",
+ Optional: true,
+ Default: "rolling",
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"rolling", "recreate"}, false),
+ ),
+ },
+ "resources": {
+ Type: schema.TypeList,
+ Description: "The mule app allocated resources",
+ Elem: DeplTargetDeplSettResourcesReadOnlyDefinition,
+ Computed: true,
+ },
+ "disable_am_log_forwarding": {
+ Type: schema.TypeBool,
+ Description: "Whether log forwarding is disabled.",
+ Optional: true,
+ Default: false,
+ },
+ "persistent_object_store": {
+ Type: schema.TypeBool,
+ Description: "Whether persistent object store is enabled. Only for RTF",
+ Computed: true,
+ },
+ "anypoint_monitoring_scope": {
+ Type: schema.TypeString,
+ Description: "The anypoint moniroting scope",
+ Computed: true,
+ },
+ "sidecars": {
+ Type: schema.TypeList,
+ Description: "The mule app sidecars.",
+ Elem: DeplTargetDeplSettSidecarsReadOnlyDefinition,
+ Computed: true,
+ },
+ "disable_external_log_forwarding": {
+ Type: schema.TypeBool,
+ Description: "Whether the log forwarding is disabled.",
+ Optional: true,
+ Default: false,
+ },
+ "tracing_enabled": {
+ Type: schema.TypeBool,
+ Description: "Whether the log tracing is enabled.",
+ Computed: true,
+ },
+ "generate_default_public_url": {
+ Type: schema.TypeBool,
+ Description: "Whether default public url should be generated.",
+ Optional: true,
+ Default: false,
+ },
+ },
+}
+
+var DeplTargetC2SSDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "provider": {
+ Type: schema.TypeString,
+ Description: "The cloud provider the target belongs to.",
+ Optional: true,
+ Default: "MC",
+ ForceNew: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"MC"}, false),
+ ),
+ },
+ "target_id": {
+ Type: schema.TypeString,
+ Description: `The unique identifier of the target within Cloudhub 2.0.
+ Checkout the [documentation](https://docs.mulesoft.com/cloudhub-2/ch2-architecture#regions-and-dns-records) for more info
+ `,
+ Required: true,
+ ForceNew: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice(
+ []string{
+ "cloudhub-us-east-1", "cloudhub-us-east-2",
+ "cloudhub-us-west-1", "cloudhub-us-west-2",
+ "cloudhub-ca-central-1", "cloudhub-sa-east-1",
+ "cloudhub-ap-southeast-1", "cloudhub-ap-southeast-2",
+ "cloudhub-ap-northeast-1", "cloudhub-eu-west-1",
+ "cloudhub-eu-central-1", "cloudhub-eu-west-2",
+ },
+ false,
+ ),
+ ),
+ },
+ "deployment_settings": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Description: "The settings of the target for the deployment to perform.",
+ Required: true,
+ Elem: DeplTargetDeploymentSettingsC2SSDefinition,
+ },
+ "replicas": {
+ Type: schema.TypeInt,
+ Description: "The number of replicas. Default is 1.",
+ Optional: true,
+ Default: 1,
+ },
+ },
+}
+
+func resourceCloudhub2SharedSpaceDeployment() *schema.Resource {
+ return &schema.Resource{
+ CreateContext: resourceCloudhub2SharedSpaceDeploymentCreate,
+ ReadContext: resourceCloudhub2SharedSpaceDeploymentRead,
+ UpdateContext: resourceCloudhub2SharedSpaceDeploymentUpdate,
+ DeleteContext: resourceCloudhub2SharedSpaceDeploymentDelete,
+ Description: `
+ Creates and manages a ` + "`" + `deployment` + "`" + ` of a mule app on Cloudhub v2 Shared-Space only.
+ `,
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The unique id of the mule app deployment in the platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The organization where the mule app is deployed.",
+ },
+ "env_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The environment where mule app is deployed.",
+ },
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The name of the deployed mule app.",
+ },
+ "creation_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The creation date of the mule app.",
+ },
+ "last_modified_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The last modification date of the mule app.",
+ },
+ "desired_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The deployment desired version of the mule app.",
+ },
+ "replicas": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Data of the mule app replicas",
+ Elem: ReplicasReadOnlyDefinition,
+ },
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Data of the mule app replicas",
+ },
+ "application": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Required: true,
+ Description: "The details of the application to deploy",
+ Elem: DeplApplicationC2SSDefinition,
+ },
+ "target": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Required: true,
+ Description: "The details of the target to perform the deployment on.",
+ Elem: DeplTargetC2SSDefinition,
+ },
+ "last_successful_version": {
+ Type: schema.TypeString,
+ Description: "The last successfully deployed version",
+ Computed: true,
+ },
+ },
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ }
+}
+
+func resourceCloudhub2SharedSpaceDeploymentCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ name := d.Get("name").(string)
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ body := newCloudhub2SharedSpaceDeploymentBody(d)
+ //Execute post deployment
+ res, httpr, err := pco.appmanagerclient.DefaultApi.PostDeployment(authctx, orgid, envid).DeploymentRequestBody(*body).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to create " + name + " deployment for cloudhub 2.0 shared-space.",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ d.SetId(res.GetId())
+ return resourceCloudhub2SharedSpaceDeploymentRead(ctx, d, m)
+}
+
+func resourceCloudhub2SharedSpaceDeploymentRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ id := d.Id()
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ if isComposedResourceId(id) {
+ orgid, envid, id = decomposeCloudhub2SharedSpaceDeploymentId(d)
+ }
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ //perform request
+ res, httpr, err := pco.appmanagerclient.DefaultApi.GetDeploymentById(authctx, orgid, envid, id).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to read cloudhub2 deployment " + id + " on shared-space.",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+
+ //process data
+ data := flattenAppDeploymentV2(res)
+ if err := setAppDeploymentV2AttributesToResourceData(d, data); err != nil {
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set App Deployment details attributes",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+ // setting all params required for reading in case of import
+ d.SetId(res.GetId())
+ d.Set("org_id", orgid)
+ d.Set("env_id", envid)
+
+ return diags
+}
+
+func resourceCloudhub2SharedSpaceDeploymentUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ if !d.HasChanges(getCloudhub2SharedSpaceDeploymentUpdatableAttributes()...) {
+ return diags
+ }
+ pco := m.(ProviderConfOutput)
+ id := d.Id()
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ name := d.Get("name").(string)
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ body := newCloudhub2SharedSpaceDeploymentBody(d)
+ _, httpr, err := pco.appmanagerclient.DefaultApi.PatchDeployment(authctx, orgid, envid, id).DeploymentRequestBody(*body).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to update deployment " + name + " on cloudhub 2.0 shared-space.",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ return resourceCloudhub2SharedSpaceDeploymentRead(ctx, d, m)
+}
+
+func resourceCloudhub2SharedSpaceDeploymentDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ id := d.Id()
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ name := d.Get("name").(string)
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ httpr, err := pco.appmanagerclient.DefaultApi.DeleteDeployment(authctx, orgid, envid, id).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to delete deployment " + name + " on cloudhub 2.0 shared-space.",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ // d.SetId("") is automatically called assuming delete returns no errors, but
+ // it is added here for explicitness.
+ d.SetId("")
+ return diags
+}
+
+// Prepares Deployment Post Body out of resource data input
+func newCloudhub2SharedSpaceDeploymentBody(d *schema.ResourceData) *application_manager_v2.DeploymentRequestBody {
+ body := application_manager_v2.NewDeploymentRequestBody()
+ // -- Parsing Application
+ app_list_d := d.Get("application").([]interface{})
+ app_d := app_list_d[0].(map[string]interface{})
+ application := newCloudhub2SharedSpaceDeploymentApplication(app_d)
+ // -- Parsing Target
+ target_list_d := d.Get("target").([]interface{})
+ target_d := target_list_d[0].(map[string]interface{})
+ target := newCloudhub2SharedSpaceDeploymentTarget(target_d)
+ //Set Body Data
+ body.SetName(d.Get("name").(string))
+ body.SetApplication(*application)
+ body.SetTarget(*target)
+
+ return body
+}
+
+// Prepares Application object out of map input
+func newCloudhub2SharedSpaceDeploymentApplication(app_d map[string]interface{}) *application_manager_v2.Application {
+ ref_list_d := app_d["ref"].([]interface{})
+ ref_d := ref_list_d[0].(map[string]interface{})
+ // Ref
+ ref := newCloudhub2SharedSpaceDeploymentRef(ref_d)
+ //Parse Configuration
+ configuration_list_d := app_d["configuration"].([]interface{})
+ configuration_d := configuration_list_d[0].(map[string]interface{})
+ configuration := newCloudhub2SharedSpaceDeploymentConfiguration(configuration_d)
+ //VCores
+ vcores_d := app_d["vcores"].(float64)
+ //Object Store V2
+ object_store_v2_enabled_d := app_d["object_store_v2_enabled"].(bool)
+ //Application Integration
+ integrations := application_manager_v2.NewApplicationIntegrations()
+ object_store_v2 := application_manager_v2.NewObjectStoreV2()
+ object_store_v2.SetEnabled(object_store_v2_enabled_d)
+ services := application_manager_v2.NewServices()
+ services.SetObjectStoreV2(*object_store_v2)
+ integrations.SetServices(*services)
+ //Application
+ application := application_manager_v2.NewApplication()
+ application.SetDesiredState(app_d["desired_state"].(string))
+ application.SetConfiguration(*configuration)
+ application.SetIntegrations(*integrations)
+ application.SetRef(*ref)
+ application.SetVCores(float32(vcores_d))
+
+ return application
+}
+
+// Prepares Target object out of map input
+func newCloudhub2SharedSpaceDeploymentTarget(target_d map[string]interface{}) *application_manager_v2.Target {
+ deployment_settings_list_d := target_d["deployment_settings"].([]interface{})
+ deployment_settings_d := deployment_settings_list_d[0].(map[string]interface{})
+ deployment_settings := newCloudhub2SharedSpaceDeploymentDeploymentSettings(deployment_settings_d)
+ //Prepare Target data
+ target := application_manager_v2.NewTarget()
+ target.SetProvider(target_d["provider"].(string))
+ target.SetTargetId(target_d["target_id"].(string))
+ target.SetDeploymentSettings(*deployment_settings)
+ target.SetReplicas(int32(target_d["replicas"].(int)))
+
+ return target
+}
+
+// Prepares Ref Object out of map input
+func newCloudhub2SharedSpaceDeploymentRef(ref_d map[string]interface{}) *application_manager_v2.Ref {
+ ref := application_manager_v2.NewRef()
+ ref.SetGroupId(ref_d["group_id"].(string))
+ ref.SetArtifactId(ref_d["artifact_id"].(string))
+ ref.SetVersion(ref_d["version"].(string))
+ ref.SetPackaging(ref_d["packaging"].(string))
+ return ref
+}
+
+// Prepares Application Configuration Object out of map input
+func newCloudhub2SharedSpaceDeploymentConfiguration(configuration_d map[string]interface{}) *application_manager_v2.AppConfiguration {
+ //Mule Agent App Properties Service
+ mule_agent_app_props_service_list_d := configuration_d["mule_agent_app_props_service"].([]interface{})
+ mule_agent_app_props_service_d := mule_agent_app_props_service_list_d[0].(map[string]interface{})
+ mule_agent_app_props_service_properties := mule_agent_app_props_service_d["properties"].(map[string]interface{})
+ mule_agent_app_props_service_secure_properties := mule_agent_app_props_service_d["secure_properties"].(map[string]interface{})
+ mule_agent_app_props_service := application_manager_v2.NewMuleAgentAppPropService()
+ mule_agent_app_props_service.SetProperties(mule_agent_app_props_service_properties)
+ mule_agent_app_props_service.SetSecureProperties(mule_agent_app_props_service_secure_properties)
+ mule_agent_logging_service_list_d := configuration_d["mule_agent_logging_service"].([]interface{})
+ mule_agent_logging_service_d := mule_agent_logging_service_list_d[0].(map[string]interface{})
+ //Scope logging configuration
+ scope_logging_configurations_list_d := mule_agent_logging_service_d["scope_logging_configurations"].([]interface{})
+ scope_logging_configurations := make([]application_manager_v2.ScopeLoggingConfiguration, len(scope_logging_configurations_list_d))
+ for i, item := range scope_logging_configurations_list_d {
+ data := item.(map[string]interface{})
+ conf := application_manager_v2.NewScopeLoggingConfiguration()
+ conf.SetScope(data["scope"].(string))
+ conf.SetLogLevel(data["log_level"].(string))
+ scope_logging_configurations[i] = *conf
+ }
+ //Mule Agent Logging Service
+ mule_agent_logging_service := application_manager_v2.NewMuleAgentLoggingService()
+ mule_agent_logging_service.SetScopeLoggingConfigurations(scope_logging_configurations)
+ configuration := application_manager_v2.NewAppConfiguration()
+ configuration.SetMuleAgentApplicationPropertiesService(*mule_agent_app_props_service)
+ configuration.SetMuleAgentLoggingService(*mule_agent_logging_service)
+
+ return configuration
+}
+
+// Prepares DeploymentSettings object out of map input
+func newCloudhub2SharedSpaceDeploymentDeploymentSettings(deployment_settings_d map[string]interface{}) *application_manager_v2.DeploymentSettings {
+ //http
+ http := newCloudhub2SharedSpaceDeploymentHttp(deployment_settings_d)
+ //runtime
+ runtime := newCloudhub2SharedSpaceDeploymentRuntime(deployment_settings_d)
+ //autoscaling
+ autoscaling := newCloudhub2SharedSpaceDeploymentAutoscaling(deployment_settings_d)
+ //Prepare JVM Args data
+ jvm := application_manager_v2.NewJvm()
+ jvm.SetArgs(deployment_settings_d["jvm_args"].(string))
+ deployment_settings := application_manager_v2.NewDeploymentSettings()
+ deployment_settings.SetClustered(deployment_settings_d["clustered"].(bool))
+ deployment_settings.SetHttp(*http)
+ deployment_settings.SetJvm(*jvm)
+ deployment_settings.SetUpdateStrategy(deployment_settings_d["update_strategy"].(string))
+ deployment_settings.SetDisableAmLogForwarding(deployment_settings_d["disable_am_log_forwarding"].(bool))
+ // deployment_settings.SetPersistentObjectStore(deployment_settings_d["persistent_object_store"].(bool))
+ deployment_settings.SetDisableExternalLogForwarding(deployment_settings_d["disable_external_log_forwarding"].(bool))
+ deployment_settings.SetGenerateDefaultPublicUrl(deployment_settings_d["generate_default_public_url"].(bool))
+ deployment_settings.SetRuntime(*runtime)
+ deployment_settings.SetAutoscaling(*autoscaling)
+
+ return deployment_settings
+}
+
+// Prepares Runtime object out of map input
+func newCloudhub2SharedSpaceDeploymentRuntime(deployment_settings_d map[string]interface{}) *application_manager_v2.Runtime {
+ runtime := application_manager_v2.NewRuntime()
+ if val, ok := deployment_settings_d["runtime"]; ok {
+ runtime_list_d := val.([]interface{})
+ if len(runtime_list_d) > 0 {
+ runtime_d := runtime_list_d[0].(map[string]interface{})
+ runtime.SetVersion(runtime_d["version"].(string))
+ runtime.SetReleaseChannel(runtime_d["release_channel"].(string))
+ runtime.SetJava(runtime_d["java"].(string))
+ }
+ }
+ return runtime
+}
+
+// Prepares Http object out of map input
+func newCloudhub2SharedSpaceDeploymentHttp(deployment_settings_d map[string]interface{}) *application_manager_v2.Http {
+ http_inbound := application_manager_v2.NewHttpInbound()
+ http := application_manager_v2.NewHttp()
+ if val, ok := deployment_settings_d["http"]; ok {
+ http_list_d := val.([]interface{})
+ if len(http_list_d) > 0 {
+ http_d := http_list_d[0].(map[string]interface{})
+ http_inbound.SetLastMileSecurity(http_d["inbound_last_mile_security"].(bool))
+ http.SetInbound(*http_inbound)
+ }
+ }
+ return http
+}
+
+func newCloudhub2SharedSpaceDeploymentAutoscaling(deployment_settings_d map[string]interface{}) *application_manager_v2.Autoscaling {
+ autoscaling := application_manager_v2.NewAutoscaling()
+ if val, ok := deployment_settings_d["autoscaling"]; ok {
+ autoscaling_list_d := val.([]interface{})
+ if len(autoscaling_list_d) > 0 {
+ autoscaling_d := autoscaling_list_d[0].(map[string]interface{})
+ autoscaling.SetEnabled(autoscaling_d["enabled"].(bool))
+ autoscaling.SetMinReplicas(int32(autoscaling_d["min_replicas"].(int)))
+ autoscaling.SetMaxReplicas(int32(autoscaling_d["max_replicas"].(int)))
+ }
+ }
+ return autoscaling
+}
+
+func VCoresValidatorDiag(v interface{}, p cty.Path) diag.Diagnostics {
+ value := v.(float64)
+ var diags diag.Diagnostics
+ if !FloatInSlice([]float64{0.1, 0.2, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4}, value) {
+ diag := diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "wrong vcores value",
+ Detail: fmt.Sprintf("%f is not a valid vcores value.", value),
+ }
+ diags = append(diags, diag)
+ }
+ return diags
+}
+
+func decomposeCloudhub2SharedSpaceDeploymentId(d *schema.ResourceData) (string, string, string) {
+ s := DecomposeResourceId(d.Id())
+ return s[0], s[1], s[2]
+}
+
+func getCloudhub2SharedSpaceDeploymentUpdatableAttributes() []string {
+ attributes := [...]string{"application", "target"}
+ return attributes[:]
+}
diff --git a/anypoint/resource_dlb.go b/anypoint/resource_dlb.go
index a0b9c81..2b45ffe 100644
--- a/anypoint/resource_dlb.go
+++ b/anypoint/resource_dlb.go
@@ -834,7 +834,7 @@ func equalDLBAllowList(old, new interface{}) bool {
}
// returns true if the DLB key elements have been changed
-func isDLBChanged(ctx context.Context, d *schema.ResourceData, m interface{}) bool {
+func isDLBChanged(_ context.Context, d *schema.ResourceData, _ interface{}) bool {
watchAttrs := getDLBPatchWatchAttributes()
for _, attr := range watchAttrs {
diff --git a/anypoint/resource_fabrics.go b/anypoint/resource_fabrics.go
new file mode 100644
index 0000000..7dccd8c
--- /dev/null
+++ b/anypoint/resource_fabrics.go
@@ -0,0 +1,307 @@
+package anypoint
+
+import (
+ "context"
+ "io"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+ rtf "github.com/mulesoft-anypoint/anypoint-client-go/rtf"
+)
+
+func resourceFabrics() *schema.Resource {
+ return &schema.Resource{
+ CreateContext: resourceFabricsCreate,
+ ReadContext: resourceFabricsRead,
+ UpdateContext: resourceFabricsUpdate,
+ DeleteContext: resourceFabricsDelete,
+ Description: `
+ Creates a ` + "`" + `Runtime Fabrics` + "`" + ` instance.
+ `,
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The unique id of this fabrics generated by the anypoint platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The organization id where the fabrics is defined.",
+ },
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The name of the fabrics",
+ },
+ "region": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: `
+ The region where fabrics instance is hosted. Refer to the official documentation for the list of available regions.
+ The list of regions is available [here](https://docs.mulesoft.com/cloudhub-2/ch2-architecture#regions-and-dns-records).
+ Examples: us-east-1 / us-east-2
+ `,
+ },
+ "vendor": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: `
+ The vendor name of the kubernetes instance hosting fabrics. The following values are supported:
+ * eks: AWS Elastic Kubernetes Service
+ * aks: Azure Kubernetes Service
+ * gke: Google Kubernetes Service
+ * ack: Alibaba Kubernetes Service
+ * openshift: Openshift
+ * rancher: Rancher
+ `,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice(
+ []string{"eks", "aks", "gke", "ack", "openshift", "rancher"},
+ false,
+ ),
+ ),
+ },
+ "vendor_metadata": {
+ Type: schema.TypeMap,
+ Computed: true,
+ Description: "The vendor metadata",
+ },
+ "version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The version of fabrics.",
+ },
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The status of the farbics instance.",
+ },
+ "desired_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The desired version of fabrics.",
+ },
+ "available_upgrade_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The available upgrade version of fabrics.",
+ },
+ "created_at": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The creation date of the fabrics instance",
+ },
+ "upgrade": {
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "The status of the fabrics. Only available when instance is created and not activated yet. This cannot be set by user, any value the user puts is ignored.",
+ Elem: FabricsUpgradeDefinition,
+ },
+ "nodes": {
+ Type: schema.TypeList,
+ Computed: true,
+ Elem: NodeDefinition,
+ Description: "The list of fabrics nodes.",
+ },
+ "activation_data": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The activation data to use during installation of fabrics on the kubernetes cluster. Only available when instance is created and not activated yet.",
+ },
+ "seconds_since_heartbeat": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The number of seconds since last heartbeat.",
+ },
+ "kubernetes_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The kubernetes version of the cluster.",
+ },
+ "namespace": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The namespace where runtime fabrics is installed.",
+ },
+ "license_expiry_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The expiry date of the license (timestamp).",
+ },
+ "is_managed": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether this cluster is managed.",
+ },
+ "is_helm_managed": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether this cluster is managed by helmet.",
+ },
+ "app_scoped_log_forwarding": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether app scoped log forwarding is active.",
+ },
+ "cluster_configuration_level": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The configuration level of the cluster (production or development).",
+ },
+ "features": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The features of this cluster.",
+ Elem: FabricsFeaturesDefinition,
+ },
+ "ingress": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The ingress configurations of this cluster.",
+ Elem: FabricsIngressDomainsDefinition,
+ },
+ },
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ }
+}
+
+func resourceFabricsCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ orgid := d.Get("org_id").(string)
+ name := d.Get("name").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ body := prepareFabricsPostBody(d)
+ //prepare request
+ res, httpr, err := pco.rtfclient.DefaultApi.PostFabrics(authctx, orgid).FabricsPostBody(*body).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to create fabrics " + name,
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+
+ id := res.GetId()
+ d.SetId(id)
+ return resourceFabricsRead(ctx, d, m)
+}
+
+func resourceFabricsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ fabricsid := d.Id()
+ orgid := d.Get("org_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ if isComposedResourceId(fabricsid) {
+ orgid, fabricsid = decomposeFabricsId(d)
+ }
+ //perform request
+ res, httpr, err := pco.rtfclient.DefaultApi.GetFabrics(authctx, orgid, fabricsid).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to read fabrics " + fabricsid,
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ data := flattenFabricsData(res)
+ //save in data source schema
+ if err := setFabricsResourceData(d, data); err != nil {
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set fabrics " + fabricsid,
+ Detail: err.Error(),
+ })
+ return diags
+ }
+ d.SetId(fabricsid)
+ d.Set("org_id", orgid)
+ return diags
+}
+
+func resourceFabricsUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ return resourceFabricsRead(ctx, d, m)
+}
+
+func resourceFabricsDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ fabricsid := d.Id()
+ orgid := d.Get("org_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ //perform request
+ httpr, err := pco.rtfclient.DefaultApi.DeleteFabrics(authctx, orgid, fabricsid).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to delete fabrics " + fabricsid,
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ // d.SetId("") is automatically called assuming delete returns no errors, but
+ // it is added here for explicitness.
+ d.SetId("")
+
+ return diags
+}
+
+func prepareFabricsPostBody(d *schema.ResourceData) *rtf.FabricsPostBody {
+ body := rtf.NewFabricsPostBody()
+ body.SetName(d.Get("name").(string))
+ body.SetVendor(d.Get("vendor").(string))
+ body.SetRegion(d.Get("region").(string))
+ return body
+}
+
+func decomposeFabricsId(d *schema.ResourceData) (string, string) {
+ s := DecomposeResourceId(d.Id())
+ return s[0], s[1]
+}
+
+/*
+ * Returns authentication context (includes authorization header)
+ */
+func getFabricsAuthCtx(ctx context.Context, pco *ProviderConfOutput) context.Context {
+ tmp := context.WithValue(ctx, rtf.ContextAccessToken, pco.access_token)
+ return context.WithValue(tmp, rtf.ContextServerIndex, pco.server_index)
+}
diff --git a/anypoint/resource_fabrics_associations.go b/anypoint/resource_fabrics_associations.go
new file mode 100644
index 0000000..b197972
--- /dev/null
+++ b/anypoint/resource_fabrics_associations.go
@@ -0,0 +1,253 @@
+package anypoint
+
+import (
+ "context"
+ "io"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ rtf "github.com/mulesoft-anypoint/anypoint-client-go/rtf"
+)
+
+func resourceFabricsAssociations() *schema.Resource {
+ return &schema.Resource{
+ CreateContext: resourceFabricsAssociationsCreate,
+ ReadContext: resourceFabricsAssociationsRead,
+ DeleteContext: resourceFabricsAssociationsDelete,
+ Description: `
+ Manages ` + "`" + `Runtime Fabrics` + "`" + ` Environment associations.
+ NOTE: The fabrics will be associated with all sandbox environments in every available org when this resource is deleted.
+ `,
+ Schema: map[string]*schema.Schema{
+ "last_updated": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ Description: "The last time this resource has been updated locally.",
+ },
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The unique id of this fabrics generated by the anypoint platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The organization id where the fabrics is hosted.",
+ },
+ "fabrics_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The unique id of the fabrics instance in the platform.",
+ },
+ "associations": {
+ Type: schema.TypeSet,
+ Required: true,
+ ForceNew: true,
+ MinItems: 1,
+ Description: "The list of environment associations to an instance of fabrics",
+ DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
+ return equalFabricsAssociations(d.GetChange("associations"))
+ },
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The unique id of the fabrics instance in the platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The organization id to associate with fabrics.",
+ },
+ "env_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The environment to associate with fabrics.",
+ },
+ },
+ },
+ },
+ },
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ }
+}
+
+func resourceFabricsAssociationsCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ orgid := d.Get("org_id").(string)
+ fabricsid := d.Get("fabrics_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ body := prepareFabricsAssociationsPostBody(d)
+ //prepare request
+ _, httpr, err := pco.rtfclient.DefaultApi.PostFabricsAssociations(authctx, orgid, fabricsid).FabricsAssociationsPostBody(*body).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to create fabrics " + fabricsid + " associations ",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+
+ d.SetId(ComposeResourceId([]string{orgid, fabricsid}))
+
+ return resourceFabricsAssociationsRead(ctx, d, m)
+}
+
+func resourceFabricsAssociationsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ fabricsid := d.Get("fabrics_id").(string)
+ orgid := d.Get("org_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ if isComposedResourceId(d.Id()) {
+ orgid, fabricsid = decomposeFabricsAssociationsId(d)
+ }
+ //perform request
+ res, httpr, err := pco.rtfclient.DefaultApi.GetFabricsAssociations(authctx, orgid, fabricsid).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to read fabrics " + fabricsid + " associations",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ //process data
+ list := flattenFabricsAssociationsData(res)
+ //save in data source schema
+ if err := d.Set("associations", list); err != nil {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set fabrics " + fabricsid + " associations",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+ d.SetId(ComposeResourceId([]string{orgid, fabricsid}))
+ d.Set("org_id", orgid)
+ d.Set("fabrics_id", fabricsid)
+ return diags
+}
+
+func resourceFabricsAssociationsDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ fabricsid := d.Get("fabrics_id").(string)
+ orgid := d.Get("org_id").(string)
+ authctx := getFabricsAuthCtx(ctx, &pco)
+ body := prepareFabricsAssociationsDeleteBody(d)
+ //perform request
+ _, httpr, err := pco.rtfclient.DefaultApi.PostFabricsAssociations(authctx, orgid, fabricsid).FabricsAssociationsPostBody(*body).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to delete fabrics " + fabricsid,
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ // d.SetId("") is automatically called assuming delete returns no errors, but
+ // it is added here for explicitness.
+ d.SetId("")
+
+ return diags
+}
+
+func prepareFabricsAssociationsPostBody(d *schema.ResourceData) *rtf.FabricsAssociationsPostBody {
+ body := rtf.NewFabricsAssociationsPostBody()
+ associations := d.Get("associations").(*schema.Set)
+
+ if associations.Len() == 0 {
+ return nil
+ }
+ res := make([]rtf.FabricsAssociationsPostBodyAssociationsInner, associations.Len())
+ for i, association := range associations.List() {
+ parsedAssoc := association.(map[string]interface{})
+ inner := rtf.NewFabricsAssociationsPostBodyAssociationsInner()
+ inner.SetOrganizationId(parsedAssoc["org_id"].(string))
+ inner.SetEnvironment(parsedAssoc["env_id"].(string))
+ res[i] = *inner
+ }
+
+ body.SetAssociations(res)
+
+ return body
+}
+
+func prepareFabricsAssociationsDeleteBody(_ *schema.ResourceData) *rtf.FabricsAssociationsPostBody {
+ body := rtf.NewFabricsAssociationsPostBody()
+ env := "sandbox"
+ org := "all"
+ associations := []rtf.FabricsAssociationsPostBodyAssociationsInner{
+ {
+ Environment: &env,
+ OrganizationId: &org,
+ },
+ }
+ body.SetAssociations(associations)
+ return body
+}
+
+func decomposeFabricsAssociationsId(d *schema.ResourceData) (string, string) {
+ s := DecomposeResourceId(d.Id())
+ return s[0], s[1]
+}
+
+func equalFabricsAssociations(old, new interface{}) bool {
+ old_set := old.(*schema.Set)
+ old_list := old_set.List()
+ new_set := new.(*schema.Set)
+ new_list := new_set.List()
+ //sort lists
+ sortAttr := []string{"org_id", "env_id"}
+ SortMapListAl(new_list, sortAttr)
+ SortMapListAl(old_list, sortAttr)
+ if len(new_list) != len(old_list) {
+ return false
+ }
+ for i, val := range old_list {
+ o := val.(map[string]interface{})
+ n := new_list[i].(map[string]interface{})
+ if n["org_id"].(string) != o["org_id"].(string) || n["env_id"].(string) != o["env_id"].(string) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/anypoint/resource_rolegroup_roles.go b/anypoint/resource_rolegroup_roles.go
index bb23873..0061250 100644
--- a/anypoint/resource_rolegroup_roles.go
+++ b/anypoint/resource_rolegroup_roles.go
@@ -235,7 +235,7 @@ func resourceRoleGroupRolesDelete(ctx context.Context, d *schema.ResourceData, m
/**
* Generates body object for creating rolegroup roles
*/
-func newRolegroupRolesPostBody(org_id string, rolegroup_id string, d *schema.ResourceData) ([]map[string]interface{}, diag.Diagnostics) {
+func newRolegroupRolesPostBody(org_id string, _ string, d *schema.ResourceData) ([]map[string]interface{}, diag.Diagnostics) {
var diags diag.Diagnostics
roles := d.Get("roles").([]interface{})
diff --git a/anypoint/resource_rtf_deployment.go b/anypoint/resource_rtf_deployment.go
new file mode 100644
index 0000000..4c1cd37
--- /dev/null
+++ b/anypoint/resource_rtf_deployment.go
@@ -0,0 +1,932 @@
+package anypoint
+
+import (
+ "context"
+ "io"
+ "regexp"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+ application_manager_v2 "github.com/mulesoft-anypoint/anypoint-client-go/application_manager_v2"
+)
+
+var DeplApplicationConfigLoggingRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "artifact_name": {
+ Type: schema.TypeString,
+ Description: "The application name.",
+ Computed: true,
+ },
+ "scope_logging_configurations": {
+ Type: schema.TypeList,
+ Description: "Additional log levels and categories to include in logs.",
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "scope": {
+ Type: schema.TypeString,
+ Description: "The logging package scope",
+ Required: true,
+ },
+ "log_level": {
+ Type: schema.TypeString,
+ Description: "The application log level: INFO / DEBUG / WARNING / ERROR / FATAL",
+ Required: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"INFO", "DEBUG", "WARNING", "ERROR", "FATAL"}, false),
+ ),
+ },
+ },
+ },
+ },
+ },
+}
+
+var DeplApplicationConfigPropsRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "application_name": {
+ Type: schema.TypeString,
+ Description: "The application name",
+ Computed: true,
+ },
+ "properties": {
+ Type: schema.TypeMap,
+ Description: "The mule application properties.",
+ Optional: true,
+ DefaultFunc: func() (interface{}, error) { return make(map[string]string), nil },
+ },
+ "secure_properties": {
+ Type: schema.TypeMap,
+ Description: "The mule application secured properties.",
+ Optional: true,
+ DefaultFunc: func() (interface{}, error) { return make(map[string]string), nil },
+ },
+ },
+}
+
+var DeplApplicationConfigRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "mule_agent_app_props_service": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Description: "The mule app properties",
+ Elem: DeplApplicationConfigPropsRTFDefinition,
+ Required: true,
+ },
+ "mule_agent_logging_service": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Description: "The mule app logging props",
+ Elem: DeplApplicationConfigLoggingRTFDefinition,
+ Optional: true,
+ },
+ "mule_agent_scheduling_service": {
+ Type: schema.TypeList,
+ Description: "The mule app scheduling",
+ Elem: DeplApplicationConfigSchedulingReadOnlyDefinition,
+ Computed: true,
+ },
+ },
+}
+
+var DeplApplicationRefRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "group_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The groupId of the application.",
+ },
+ "artifact_id": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The artifactId of the application.",
+ },
+ "version": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The version of the application.",
+ },
+ "packaging": {
+ Type: schema.TypeString,
+ Required: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"jar"}, false),
+ ),
+ Description: "The packaging of the application. Only 'jar' is supported.",
+ },
+ },
+}
+
+var DeplApplicationRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The status of the application.",
+ },
+ "desired_state": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "STARTED",
+ Description: "The desired state of the application.",
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice(
+ []string{
+ "PARTIALLY_STARTED", "DEPLOYMENT_FAILED", "STARTING", "STARTED", "STOPPING",
+ "STOPPED", "UNDEPLOYING", "UNDEPLOYED", "UPDATED", "APPLIED", "APPLYING", "FAILED", "DELETED",
+ },
+ false,
+ ),
+ ),
+ },
+ "ref": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Required: true,
+ Description: `
+ The reference to the artifact on Exchange that is to be deployed on Runtime Fabrics.
+ Please ensure the application's artifact is deployed on Exchange before using this resource on Runtime Fabrics.
+ `,
+ Elem: DeplApplicationRefRTFDefinition,
+ },
+ "configuration": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Required: true,
+ Description: "The configuration of the application.",
+ Elem: DeplApplicationConfigRTFDefinition,
+ },
+ "vcores": {
+ Type: schema.TypeFloat,
+ Description: "The allocated virtual cores.",
+ Computed: true,
+ },
+ "object_store_v2_enabled": {
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Whether object store v2 is enabled. Only for Cloudhub.",
+ },
+ },
+}
+
+var DeplTargetDeplSettHttpRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "inbound_public_url": {
+ Type: schema.TypeString,
+ Description: `The ingress url(s).
+ If you need to use multiple ingress urls, separete them with commas.
+ example: http://example.mulesoft.terraform.net/(.+)
+ `,
+ Optional: true,
+ Default: "",
+ },
+ "inbound_path_rewrite": {
+ Type: schema.TypeString,
+ Description: "The inbound path rewrite. This option is only available for Cloudhub 2.0 with private spaces",
+ Computed: true,
+ },
+ "inbound_last_mile_security": {
+ Type: schema.TypeBool,
+ Description: "Last-mile security means that the connection between ingress and the actual Mule app will be HTTPS.",
+ Optional: true,
+ Default: false,
+ },
+ "inbound_forward_ssl_session": {
+ Type: schema.TypeBool,
+ Description: "Whether to forward the ssl session.",
+ Optional: true,
+ Default: false,
+ },
+ "inbound_internal_url": {
+ Type: schema.TypeString,
+ Description: "The inbound internal url.",
+ Computed: true,
+ },
+ "inbound_unique_id": {
+ Type: schema.TypeString,
+ Description: "The inbound unique id.",
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetDeplSettRuntimeRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "version": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to:
+ - a full image version with tag (i.e "4.6.0:40e-java17"),
+ - a base version with a partial tag not indicating the java version (i.e. "4.6.0:40")
+ - or only a base version (i.e. "4.6.0").
+ Defaults to the latest image version.
+ This field has precedence over the legacy 'target.deploymentSettings.runtimeVersion'.
+ Learn more about Mule runtime release notes [here](https://docs.mulesoft.com/release-notes/runtime-fabric/runtime-fabric-runtimes-release-notes)
+ `,
+ Required: true,
+ },
+ "release_channel": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to one of:
+ - "LTS"
+ - "EDGE"
+ - "LEGACY".
+ Defaults to "EDGE". This field has precedence over the legacy 'target.deploymentSettings.runtimeReleaseChannel'.
+ Learn more on release channels [here](https://docs.mulesoft.com/release-notes/mule-runtime/lts-edge-release-cadence).
+ `,
+ Optional: true,
+ Default: "EDGE",
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"LTS", "EDGE", "LEGACY"}, false),
+ ),
+ },
+ "java": {
+ Type: schema.TypeString,
+ Description: `
+ On deployment operations it can be set to one of:
+ - "8"
+ - "17"
+ Defaults to "8".
+ Learn more about Java support [here](https://docs.mulesoft.com/general/java-support).
+ `,
+ Optional: true,
+ Default: "8",
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"8", "17"}, false),
+ ),
+ },
+ },
+}
+
+var DeplTargetDeplSettResourcesRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_limit": {
+ Type: schema.TypeString,
+ Description: "The CPU limit",
+ Required: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringMatch(
+ regexp.MustCompile(`^\d+m$`),
+ "field value should be a valid cpu representation. ex: 100m (= 0.1 vcores).",
+ ),
+ ),
+ },
+ "cpu_reserved": {
+ Type: schema.TypeString,
+ Description: "The CPU reserved.",
+ Required: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringMatch(
+ regexp.MustCompile(`^\d+m$`),
+ "field value should be a valid cpu representation. ex: 100m (= 0.1 vcores).",
+ ),
+ ),
+ },
+ "memory_limit": {
+ Type: schema.TypeString,
+ Description: "The memory limit",
+ Required: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringMatch(
+ regexp.MustCompile(`^\d+Mi$`),
+ "field value should be a valid memory representation. ex: 1000Mi (= 1Gb).",
+ ),
+ ),
+ },
+ "memory_reserved": {
+ Type: schema.TypeString,
+ Description: "The memory reserved.",
+ Required: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringMatch(
+ regexp.MustCompile(`^\d+Mi$`),
+ "field value should be a valid memory representation. ex: 1000Mi (= 1Gb).",
+ ),
+ ),
+ },
+ "storage_limit": {
+ Type: schema.TypeString,
+ Description: "The storage limit",
+ Computed: true,
+ },
+ "storage_reserved": {
+ Type: schema.TypeString,
+ Description: "The storage reserved",
+ Computed: true,
+ },
+ },
+}
+
+var DeplTargetDeplSettAutoscalingRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enabled": {
+ Type: schema.TypeBool,
+ Description: "Enables or disables the Autoscaling feature. The possible values are: true or false.",
+ Required: true,
+ },
+ "min_replicas": {
+ Type: schema.TypeInt,
+ Description: "Set the minimum amount of replicas for your deployment. The minimum accepted value is 1. The maximum is 3.",
+ Optional: true,
+ Default: 1,
+ ValidateDiagFunc: validation.ToDiagFunc(validation.IntBetween(1, 3)),
+ },
+ "max_replicas": {
+ Type: schema.TypeInt,
+ Description: "Set the maximum amount of replicas your application can scale to. The minimum accepted value is 2. The maximum is 32.",
+ Optional: true,
+ Default: 2,
+ ValidateDiagFunc: validation.ToDiagFunc(validation.IntBetween(2, 32)),
+ },
+ },
+}
+
+var DeplTargetDeploymentSettingsRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "clustered": {
+ Type: schema.TypeBool,
+ Description: "Whether the application is deployed in clustered mode.",
+ Optional: true,
+ Default: false,
+ },
+ "enforce_deploying_replicas_across_nodes": {
+ Type: schema.TypeBool,
+ Description: "If true, forces the deployment of replicas across the RTF cluster. This option only available for Runtime Fabrics.",
+ Optional: true,
+ Default: false,
+ },
+ "http": {
+ Type: schema.TypeList,
+ Description: "The details about http inbound or outbound configuration",
+ Optional: true,
+ MaxItems: 1,
+ DefaultFunc: func() (interface{}, error) {
+ dict := make(map[string]interface{})
+ dict["inbound_last_mile_security"] = false
+ dict["inbound_forward_ssl_session"] = false
+ return []interface{}{dict}, nil
+ },
+ Elem: DeplTargetDeplSettHttpRTFDefinition,
+ },
+ "jvm_args": {
+ Type: schema.TypeString,
+ Description: "The java virtual machine arguments",
+ Optional: true,
+ Default: "",
+ },
+ "runtime": {
+ Type: schema.TypeList,
+ Description: "The Mule app runtime version info.",
+ Optional: true,
+ MaxItems: 1,
+ Elem: DeplTargetDeplSettRuntimeRTFDefinition,
+ },
+ "autoscaling": {
+ Type: schema.TypeList,
+ Description: `
+ Use this object to provide CPU Based Horizontal Autoscaling configuration on deployment and redeployment operations. This object is optional.
+ If Autoscaling is disabled and the fields "minReplicas" and "maxReplicas" are provided, they must match the value of "target.replicas" field.
+ Learn more about Autoscaling [here](https://docs.mulesoft.com/cloudhub-2/ch2-configure-horizontal-autoscaling).
+ `,
+ Optional: true,
+ MaxItems: 1,
+ DefaultFunc: func() (interface{}, error) {
+ dict := make(map[string]interface{})
+ dict["enabled"] = false
+ return []interface{}{dict}, nil
+ },
+ Elem: DeplTargetDeplSettAutoscalingRTFDefinition,
+ },
+ "update_strategy": {
+ Type: schema.TypeString,
+ Description: "The mule app deployment update strategy: rolling or recreate",
+ Optional: true,
+ Default: "rolling",
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"rolling", "recreate"}, false),
+ ),
+ },
+ "resources": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Description: "The mule app allocated resources.",
+ Elem: DeplTargetDeplSettResourcesRTFDefinition,
+ Required: true,
+ },
+ "disable_am_log_forwarding": {
+ Type: schema.TypeBool,
+ Description: "Whether log forwarding is disabled.",
+ Optional: true,
+ Default: false,
+ },
+ "persistent_object_store": {
+ Type: schema.TypeBool,
+ Description: "Whether persistent object store is enabled.",
+ Optional: true,
+ Default: false,
+ },
+ "anypoint_monitoring_scope": {
+ Type: schema.TypeString,
+ Description: "The anypoint moniroting scope",
+ Computed: true,
+ },
+ "sidecars": {
+ Type: schema.TypeList,
+ Description: "The mule app sidecars.",
+ Elem: DeplTargetDeplSettSidecarsReadOnlyDefinition,
+ Computed: true,
+ },
+ "disable_external_log_forwarding": {
+ Type: schema.TypeBool,
+ Description: "Whether the log forwarding is disabled.",
+ Optional: true,
+ Default: false,
+ },
+ "tracing_enabled": {
+ Type: schema.TypeBool,
+ Description: "Whether the log tracing is enabled.",
+ Computed: true,
+ },
+ "generate_default_public_url": {
+ Type: schema.TypeBool,
+ Description: "Whether default public url should be generated.",
+ Optional: true,
+ Default: false,
+ },
+ },
+}
+
+var DeplTargetRTFDefinition = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "provider": {
+ Type: schema.TypeString,
+ Description: "The cloud provider the target belongs to.",
+ Optional: true,
+ Default: "MC",
+ ForceNew: true,
+ ValidateDiagFunc: validation.ToDiagFunc(
+ validation.StringInSlice([]string{"MC"}, false),
+ ),
+ },
+ "target_id": {
+ Type: schema.TypeString,
+ Description: "The unique identifier of the Runtime Fabrics target.",
+ Required: true,
+ ForceNew: true,
+ },
+ "deployment_settings": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Description: "The settings of the target for the deployment to perform.",
+ Required: true,
+ Elem: DeplTargetDeploymentSettingsRTFDefinition,
+ },
+ "replicas": {
+ Type: schema.TypeInt,
+ Description: "The number of replicas. Default is 1.",
+ Optional: true,
+ Default: 1,
+ },
+ },
+}
+
+func resourceRTFDeployment() *schema.Resource {
+ return &schema.Resource{
+ CreateContext: resourceRTFDeploymentCreate,
+ ReadContext: resourceRTFDeploymentRead,
+ UpdateContext: resourceRTFDeploymentUpdate,
+ DeleteContext: resourceRTFDeploymentDelete,
+ Description: `
+ Creates and manages a ` + "`" + `deployment` + "`" + ` of a mule app on Runtime Fabrics only.
+ `,
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The unique id of the mule app deployment in the platform.",
+ },
+ "org_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The organization where the mule app is deployed.",
+ },
+ "env_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The environment where mule app is deployed.",
+ },
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The name of the deployed mule app.",
+ },
+ "creation_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The creation date of the mule app.",
+ },
+ "last_modified_date": {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The last modification date of the mule app.",
+ },
+ "desired_version": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The deployment desired version of the mule app.",
+ },
+ "replicas": {
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Data of the mule app replicas",
+ Elem: ReplicasReadOnlyDefinition,
+ },
+ "status": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Data of the mule app replicas",
+ },
+ "application": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Required: true,
+ Description: "The details of the application to deploy",
+ Elem: DeplApplicationRTFDefinition,
+ },
+ "target": {
+ Type: schema.TypeList,
+ MaxItems: 1,
+ Required: true,
+ Description: "The details of the target to perform the deployment on.",
+ Elem: DeplTargetRTFDefinition,
+ },
+ "last_successful_version": {
+ Type: schema.TypeString,
+ Description: "The last successfully deployed version",
+ Computed: true,
+ },
+ },
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ }
+}
+
+func resourceRTFDeploymentCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ name := d.Get("name").(string)
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ body := newRTFDeploymentBody(d)
+ //Execute post deployment
+ res, httpr, err := pco.appmanagerclient.DefaultApi.PostDeployment(authctx, orgid, envid).DeploymentRequestBody(*body).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to create " + name + " deployment for runtime fabrics.",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ d.SetId(res.GetId())
+ return resourceRTFDeploymentRead(ctx, d, m)
+}
+
+func resourceRTFDeploymentRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ id := d.Id()
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ if isComposedResourceId(id) {
+ orgid, envid, id = decomposeRTFDeploymentId(d)
+ }
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ //perform request
+ res, httpr, err := pco.appmanagerclient.DefaultApi.GetDeploymentById(authctx, orgid, envid, id).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to read runtime fabrics deployment " + id + ".",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+
+ //process data
+ data := flattenAppDeploymentV2(res)
+ if err := setAppDeploymentV2AttributesToResourceData(d, data); err != nil {
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to set App Deployment details attributes",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+ // setting all params required for reading in case of import
+ d.SetId(res.GetId())
+ d.Set("org_id", orgid)
+ d.Set("env_id", envid)
+
+ return diags
+}
+
+func resourceRTFDeploymentUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ if !d.HasChanges(getRTFDeploymentUpdatableAttributes()...) {
+ return diags
+ }
+ pco := m.(ProviderConfOutput)
+ id := d.Id()
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ name := d.Get("name").(string)
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ body := newRTFDeploymentBody(d)
+ _, httpr, err := pco.appmanagerclient.DefaultApi.PatchDeployment(authctx, orgid, envid, id).DeploymentRequestBody(*body).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to update deployment " + name + " on runtime fabrics.",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ return resourceRTFDeploymentRead(ctx, d, m)
+}
+
+func resourceRTFDeploymentDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ var diags diag.Diagnostics
+ pco := m.(ProviderConfOutput)
+ id := d.Id()
+ orgid := d.Get("org_id").(string)
+ envid := d.Get("env_id").(string)
+ name := d.Get("name").(string)
+ authctx := getAppDeploymentV2AuthCtx(ctx, &pco)
+ httpr, err := pco.appmanagerclient.DefaultApi.DeleteDeployment(authctx, orgid, envid, id).Execute()
+ if err != nil {
+ var details string
+ if httpr != nil && httpr.StatusCode >= 400 {
+ defer httpr.Body.Close()
+ b, _ := io.ReadAll(httpr.Body)
+ details = string(b)
+ } else {
+ details = err.Error()
+ }
+ diags := append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "Unable to delete deployment " + name + " on cloudhub 2.0 shared-space.",
+ Detail: details,
+ })
+ return diags
+ }
+ defer httpr.Body.Close()
+ // d.SetId("") is automatically called assuming delete returns no errors, but
+ // it is added here for explicitness.
+ d.SetId("")
+ return diags
+}
+
+// Prepares Deployment Post Body out of resource data input
+func newRTFDeploymentBody(d *schema.ResourceData) *application_manager_v2.DeploymentRequestBody {
+ body := application_manager_v2.NewDeploymentRequestBody()
+ // -- Parsing Application
+ app_list_d := d.Get("application").([]interface{})
+ app_d := app_list_d[0].(map[string]interface{})
+ application := newRTFDeploymentApplication(app_d)
+ // -- Parsing Target
+ target_list_d := d.Get("target").([]interface{})
+ target_d := target_list_d[0].(map[string]interface{})
+ target := newRTFDeploymentTarget(target_d)
+ //Set Body Data
+ body.SetName(d.Get("name").(string))
+ body.SetApplication(*application)
+ body.SetTarget(*target)
+
+ return body
+}
+
+// Prepares Application object out of map input
+func newRTFDeploymentApplication(app_d map[string]interface{}) *application_manager_v2.Application {
+ ref_list_d := app_d["ref"].([]interface{})
+ ref_d := ref_list_d[0].(map[string]interface{})
+ // Ref
+ ref := newRTFDeploymentRef(ref_d)
+ //Parse Configuration
+ configuration_list_d := app_d["configuration"].([]interface{})
+ configuration_d := configuration_list_d[0].(map[string]interface{})
+ configuration := newRTFDeploymentConfiguration(configuration_d)
+ //Object Store V2
+ // object_store_v2_enabled_d := app_d["object_store_v2_enabled"].(bool)
+ //Application Integration
+ // integrations := application_manager_v2.NewApplicationIntegrations()
+ // object_store_v2 := application_manager_v2.NewObjectStoreV2()
+ // object_store_v2.SetEnabled(object_store_v2_enabled_d)
+ // services := application_manager_v2.NewServices()
+ // services.SetObjectStoreV2(*object_store_v2)
+ // integrations.SetServices(*services)
+ //Application
+ application := application_manager_v2.NewApplication()
+ application.SetDesiredState(app_d["desired_state"].(string))
+ application.SetConfiguration(*configuration)
+ // application.SetIntegrations(*integrations)
+ application.SetRef(*ref)
+
+ return application
+}
+
+// Prepares Target object out of map input
+func newRTFDeploymentTarget(target_d map[string]interface{}) *application_manager_v2.Target {
+ deployment_settings_list_d := target_d["deployment_settings"].([]interface{})
+ deployment_settings_d := deployment_settings_list_d[0].(map[string]interface{})
+ deployment_settings := newRTFDeploymentDeploymentSettings(deployment_settings_d)
+ //Prepare Target data
+ target := application_manager_v2.NewTarget()
+ target.SetProvider(target_d["provider"].(string))
+ target.SetTargetId(target_d["target_id"].(string))
+ target.SetDeploymentSettings(*deployment_settings)
+ target.SetReplicas(int32(target_d["replicas"].(int)))
+
+ return target
+}
+
+// Prepares Ref Object out of map input
+func newRTFDeploymentRef(ref_d map[string]interface{}) *application_manager_v2.Ref {
+ ref := application_manager_v2.NewRef()
+ ref.SetGroupId(ref_d["group_id"].(string))
+ ref.SetArtifactId(ref_d["artifact_id"].(string))
+ ref.SetVersion(ref_d["version"].(string))
+ ref.SetPackaging(ref_d["packaging"].(string))
+ return ref
+}
+
+// Prepares Application Configuration Object out of map input
+func newRTFDeploymentConfiguration(configuration_d map[string]interface{}) *application_manager_v2.AppConfiguration {
+ //Mule Agent App Properties Service
+ mule_agent_app_props_service_list_d := configuration_d["mule_agent_app_props_service"].([]interface{})
+ mule_agent_app_props_service_d := mule_agent_app_props_service_list_d[0].(map[string]interface{})
+ mule_agent_app_props_service_properties := mule_agent_app_props_service_d["properties"].(map[string]interface{})
+ mule_agent_app_props_service_secure_properties := mule_agent_app_props_service_d["secure_properties"].(map[string]interface{})
+ mule_agent_app_props_service := application_manager_v2.NewMuleAgentAppPropService()
+ mule_agent_app_props_service.SetProperties(mule_agent_app_props_service_properties)
+ mule_agent_app_props_service.SetSecureProperties(mule_agent_app_props_service_secure_properties)
+ mule_agent_logging_service_list_d := configuration_d["mule_agent_logging_service"].([]interface{})
+ mule_agent_logging_service_d := mule_agent_logging_service_list_d[0].(map[string]interface{})
+ //Scope logging configuration
+ scope_logging_configurations_list_d := mule_agent_logging_service_d["scope_logging_configurations"].([]interface{})
+ scope_logging_configurations := make([]application_manager_v2.ScopeLoggingConfiguration, len(scope_logging_configurations_list_d))
+ for i, item := range scope_logging_configurations_list_d {
+ data := item.(map[string]interface{})
+ conf := application_manager_v2.NewScopeLoggingConfiguration()
+ conf.SetScope(data["scope"].(string))
+ conf.SetLogLevel(data["log_level"].(string))
+ scope_logging_configurations[i] = *conf
+ }
+ //Mule Agent Logging Service
+ mule_agent_logging_service := application_manager_v2.NewMuleAgentLoggingService()
+ mule_agent_logging_service.SetScopeLoggingConfigurations(scope_logging_configurations)
+ configuration := application_manager_v2.NewAppConfiguration()
+ configuration.SetMuleAgentApplicationPropertiesService(*mule_agent_app_props_service)
+ configuration.SetMuleAgentLoggingService(*mule_agent_logging_service)
+
+ return configuration
+}
+
+// Prepares DeploymentSettings object out of map input
+func newRTFDeploymentDeploymentSettings(deployment_settings_d map[string]interface{}) *application_manager_v2.DeploymentSettings {
+ //http
+ http := newRTFDeploymentHttp(deployment_settings_d)
+ //runtime
+ runtime := newRTFDeploymentRuntime(deployment_settings_d)
+ //autoscaling
+ autoscaling := newRTFDeploymentAutoscaling(deployment_settings_d)
+ //resources
+ resources := newRTFDeploymentResources(deployment_settings_d)
+ //Prepare JVM Args data
+ jvm := application_manager_v2.NewJvm()
+ jvm.SetArgs(deployment_settings_d["jvm_args"].(string))
+ deployment_settings := application_manager_v2.NewDeploymentSettings()
+ deployment_settings.SetClustered(deployment_settings_d["clustered"].(bool))
+ deployment_settings.SetEnforceDeployingReplicasAcrossNodes(deployment_settings_d["enforce_deploying_replicas_across_nodes"].(bool))
+ deployment_settings.SetHttp(*http)
+ deployment_settings.SetJvm(*jvm)
+ deployment_settings.SetUpdateStrategy(deployment_settings_d["update_strategy"].(string))
+ deployment_settings.SetDisableAmLogForwarding(deployment_settings_d["disable_am_log_forwarding"].(bool))
+ deployment_settings.SetPersistentObjectStore(deployment_settings_d["persistent_object_store"].(bool))
+ deployment_settings.SetDisableExternalLogForwarding(deployment_settings_d["disable_external_log_forwarding"].(bool))
+ deployment_settings.SetGenerateDefaultPublicUrl(deployment_settings_d["generate_default_public_url"].(bool))
+ deployment_settings.SetRuntime(*runtime)
+ deployment_settings.SetAutoscaling(*autoscaling)
+ deployment_settings.SetResources(*resources)
+
+ return deployment_settings
+}
+
+// Prepares Runtime object out of map input
+func newRTFDeploymentRuntime(deployment_settings_d map[string]interface{}) *application_manager_v2.Runtime {
+ runtime := application_manager_v2.NewRuntime()
+ if val, ok := deployment_settings_d["runtime"]; ok {
+ runtime_list_d := val.([]interface{})
+ if len(runtime_list_d) > 0 {
+ runtime_d := runtime_list_d[0].(map[string]interface{})
+ runtime.SetVersion(runtime_d["version"].(string))
+ runtime.SetReleaseChannel(runtime_d["release_channel"].(string))
+ runtime.SetJava(runtime_d["java"].(string))
+ }
+
+ }
+ return runtime
+}
+
+// Prepares Http object out of map input
+func newRTFDeploymentHttp(deployment_settings_d map[string]interface{}) *application_manager_v2.Http {
+ http_inbound := application_manager_v2.NewHttpInbound()
+ http := application_manager_v2.NewHttp()
+ if val, ok := deployment_settings_d["http"]; ok {
+ http_list_d := val.([]interface{})
+ if len(http_list_d) > 0 {
+ http_d := http_list_d[0].(map[string]interface{})
+ http_inbound.SetLastMileSecurity(http_d["inbound_last_mile_security"].(bool))
+ http_inbound.SetForwardSslSession(http_d["inbound_forward_ssl_session"].(bool))
+ http.SetInbound(*http_inbound)
+ }
+ }
+ return http
+}
+
+func newRTFDeploymentAutoscaling(deployment_settings_d map[string]interface{}) *application_manager_v2.Autoscaling {
+ autoscaling := application_manager_v2.NewAutoscaling()
+ if val, ok := deployment_settings_d["autoscaling"]; ok {
+ autoscaling_list_d := val.([]interface{})
+ if len(autoscaling_list_d) > 0 {
+ autoscaling_d := autoscaling_list_d[0].(map[string]interface{})
+ autoscaling.SetEnabled(autoscaling_d["enabled"].(bool))
+ autoscaling.SetMinReplicas(int32(autoscaling_d["min_replicas"].(int)))
+ autoscaling.SetMaxReplicas(int32(autoscaling_d["max_replicas"].(int)))
+ }
+ }
+ return autoscaling
+}
+
+func newRTFDeploymentResources(deployment_settings_d map[string]interface{}) *application_manager_v2.Resources {
+ resources := application_manager_v2.NewResources()
+ if val, ok := deployment_settings_d["resources"]; ok {
+ resources_list_d := val.([]interface{})
+ if len(resources_list_d) > 0 {
+ resources_d := resources_list_d[0].(map[string]interface{})
+ cpu := application_manager_v2.NewResourcesCpu()
+ cpu.SetLimit(resources_d["cpu_limit"].(string))
+ cpu.SetReserved(resources_d["cpu_reserved"].(string))
+ memory := application_manager_v2.NewResourcesMemory()
+ memory.SetLimit(resources_d["memory_limit"].(string))
+ memory.SetReserved(resources_d["memory_reserved"].(string))
+ resources.SetCpu(*cpu)
+ resources.SetMemory(*memory)
+ }
+ }
+ return resources
+}
+
+func decomposeRTFDeploymentId(d *schema.ResourceData) (string, string, string) {
+ s := DecomposeResourceId(d.Id())
+ return s[0], s[1], s[2]
+}
+
+func getRTFDeploymentUpdatableAttributes() []string {
+ attributes := [...]string{"application", "target"}
+ return attributes[:]
+}
diff --git a/anypoint/resource_team_member.go b/anypoint/resource_team_member.go
index 7086dac..d275c4b 100644
--- a/anypoint/resource_team_member.go
+++ b/anypoint/resource_team_member.go
@@ -162,7 +162,7 @@ func resourceTeamMemberRead(ctx context.Context, d *schema.ResourceData, m inter
diags = append(diags, diag.Diagnostic{
Severity: diag.Error,
Summary: "Unable to find team member " + userid + " for team " + teamid,
- Detail: err.Error(),
+ Detail: "Team Member Not Found",
})
return diags
}
diff --git a/anypoint/util.go b/anypoint/util.go
index 6d61eef..9954254 100644
--- a/anypoint/util.go
+++ b/anypoint/util.go
@@ -4,6 +4,7 @@ import (
"crypto/sha1"
"encoding/hex"
"fmt"
+ "math"
"reflect"
"sort"
"strconv"
@@ -82,6 +83,30 @@ func StringInSlice(expected []string, v string, ignoreCase bool) bool {
return false
}
+func FloatInSlice(expected []float64, v float64) bool {
+ for _, e := range expected {
+ if e == v {
+ return true
+ }
+ }
+ return false
+}
+
+// rounds a float32 value to the specified number of decimal places.
+func RoundFloat32(val float32, precision int) float32 {
+ // Convert float32 to float64 for precision in operations
+ return float32(RoundFloat64(float64(val), precision))
+}
+
+// rounds a float64 value to the specified number of decimal places.
+func RoundFloat64(val float64, precision int) float64 {
+ // Convert float32 to float64 for precision in operations
+ p := math.Pow10(precision)
+ rounded := math.Round(val*p) / p
+ // Convert back to float32
+ return rounded
+}
+
// Uses sha1 to calculate digest of the given source string
func CalcSha1Digest(source string) string {
hasher := sha1.New()
diff --git a/docs/data-sources/app_deployment_v2.md b/docs/data-sources/app_deployment_v2.md
new file mode 100644
index 0000000..5d0e70f
--- /dev/null
+++ b/docs/data-sources/app_deployment_v2.md
@@ -0,0 +1,235 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_app_deployment_v2 Data Source - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Reads a specific `Deployment`.
+ This only works for Cloudhub V2 and Runtime Fabrics Apps.
+---
+
+# anypoint_app_deployment_v2 (Data Source)
+
+Reads a specific `Deployment`.
+ This only works for Cloudhub V2 and Runtime Fabrics Apps.
+
+## Example Usage
+
+```terraform
+data "anypoint_app_deployment_v2" "app" {
+ id = "de32fc9d-6b25-4d6f-bd5e-cac32272b2f7"
+ org_id = var.root_org
+ env_id = var.env_id
+}
+```
+
+
+## Schema
+
+### Required
+
+- `env_id` (String) The environment where mule app is deployed.
+- `id` (String) The unique id of the mule app deployment in the platform.
+- `org_id` (String) The organization where the mule app is deployed.
+
+### Read-Only
+
+- `application` (List of Object) The details of the application to deploy (see [below for nested schema](#nestedatt--application))
+- `creation_date` (Number) The creation date of the mule app.
+- `desired_version` (String) The deployment desired version of the mule app.
+- `last_modified_date` (Number) The last modification date of the mule app.
+- `last_successful_version` (String) The last successfully deployed version
+- `name` (String) The name of the deployed mule app.
+- `replicas` (List of Object) Data of the mule app replicas (see [below for nested schema](#nestedatt--replicas))
+- `status` (String) Data of the mule app replicas
+- `target` (List of Object) The details of the target to perform the deployment on. (see [below for nested schema](#nestedatt--target))
+
+
+### Nested Schema for `application`
+
+Read-Only:
+
+- `configuration` (List of Object) (see [below for nested schema](#nestedobjatt--application--configuration))
+- `desired_state` (String)
+- `object_store_v2_enabled` (Boolean)
+- `ref` (List of Object) (see [below for nested schema](#nestedobjatt--application--ref))
+- `status` (String)
+- `vcores` (Number)
+
+
+### Nested Schema for `application.configuration`
+
+Read-Only:
+
+- `mule_agent_app_props_service` (List of Object) (see [below for nested schema](#nestedobjatt--application--configuration--mule_agent_app_props_service))
+- `mule_agent_logging_service` (List of Object) (see [below for nested schema](#nestedobjatt--application--configuration--mule_agent_logging_service))
+- `mule_agent_scheduling_service` (List of Object) (see [below for nested schema](#nestedobjatt--application--configuration--mule_agent_scheduling_service))
+
+
+### Nested Schema for `application.configuration.mule_agent_app_props_service`
+
+Read-Only:
+
+- `application_name` (String)
+- `properties` (Map of String)
+- `secure_properties` (Map of String)
+
+
+
+### Nested Schema for `application.configuration.mule_agent_logging_service`
+
+Read-Only:
+
+- `artifact_name` (String)
+- `scope_logging_configurations` (List of Object) (see [below for nested schema](#nestedobjatt--application--configuration--mule_agent_logging_service--scope_logging_configurations))
+
+
+### Nested Schema for `application.configuration.mule_agent_logging_service.scope_logging_configurations`
+
+Read-Only:
+
+- `log_level` (String)
+- `scope` (String)
+
+
+
+
+### Nested Schema for `application.configuration.mule_agent_scheduling_service`
+
+Read-Only:
+
+- `application_name` (String)
+- `schedulers` (List of Object) (see [below for nested schema](#nestedobjatt--application--configuration--mule_agent_scheduling_service--schedulers))
+
+
+### Nested Schema for `application.configuration.mule_agent_scheduling_service.schedulers`
+
+Read-Only:
+
+- `enabled` (Boolean)
+- `expression` (String)
+- `flow_name` (String)
+- `frequency` (String)
+- `name` (String)
+- `start_delay` (String)
+- `time_unit` (String)
+- `time_zone` (String)
+- `type` (String)
+
+
+
+
+
+### Nested Schema for `application.ref`
+
+Read-Only:
+
+- `artifact_id` (String)
+- `group_id` (String)
+- `packaging` (String)
+- `version` (String)
+
+
+
+
+### Nested Schema for `replicas`
+
+Read-Only:
+
+- `current_deployment_version` (String)
+- `deployment_location` (String)
+- `id` (String)
+- `reason` (String)
+- `state` (String)
+
+
+
+### Nested Schema for `target`
+
+Read-Only:
+
+- `deployment_settings` (List of Object) (see [below for nested schema](#nestedobjatt--target--deployment_settings))
+- `provider` (String)
+- `replicas` (Number)
+- `target_id` (String)
+
+
+### Nested Schema for `target.deployment_settings`
+
+Read-Only:
+
+- `anypoint_monitoring_scope` (String)
+- `autoscaling` (List of Object) (see [below for nested schema](#nestedobjatt--target--deployment_settings--autoscaling))
+- `clustered` (Boolean)
+- `disable_am_log_forwarding` (Boolean)
+- `disable_external_log_forwarding` (Boolean)
+- `enforce_deploying_replicas_across_nodes` (Boolean)
+- `forward_ssl_session` (Boolean)
+- `generate_default_public_url` (Boolean)
+- `http` (List of Object) (see [below for nested schema](#nestedobjatt--target--deployment_settings--http))
+- `jvm_args` (String)
+- `last_mile_security` (Boolean)
+- `persistent_object_store` (Boolean)
+- `resources` (List of Object) (see [below for nested schema](#nestedobjatt--target--deployment_settings--resources))
+- `runtime` (List of Object) (see [below for nested schema](#nestedobjatt--target--deployment_settings--runtime))
+- `sidecars` (List of Object) (see [below for nested schema](#nestedobjatt--target--deployment_settings--sidecars))
+- `tracing_enabled` (Boolean)
+- `update_strategy` (String)
+
+
+### Nested Schema for `target.deployment_settings.autoscaling`
+
+Read-Only:
+
+- `enabled` (Boolean)
+- `max_replicas` (Number)
+- `min_replicas` (Number)
+
+
+
+### Nested Schema for `target.deployment_settings.http`
+
+Read-Only:
+
+- `inbound_forward_ssl_session` (Boolean)
+- `inbound_internal_url` (String)
+- `inbound_last_mile_security` (Boolean)
+- `inbound_path_rewrite` (String)
+- `inbound_public_url` (String)
+- `inbound_unique_id` (String)
+
+
+
+### Nested Schema for `target.deployment_settings.resources`
+
+Read-Only:
+
+- `cpu_limit` (String)
+- `cpu_reserved` (String)
+- `memory_limit` (String)
+- `memory_reserved` (String)
+- `storage_limit` (String)
+- `storage_reserved` (String)
+
+
+
+### Nested Schema for `target.deployment_settings.runtime`
+
+Read-Only:
+
+- `java` (String)
+- `release_channel` (String)
+- `version` (String)
+
+
+
+### Nested Schema for `target.deployment_settings.sidecars`
+
+Read-Only:
+
+- `anypoint_monitoring_image` (String)
+- `anypoint_monitoring_resources_cpu_limit` (String)
+- `anypoint_monitoring_resources_cpu_reserved` (String)
+- `anypoint_monitoring_resources_memory_limit` (String)
+- `anypoint_monitoring_resources_memory_reserved` (String)
+
+
diff --git a/docs/data-sources/app_deployments_v2.md b/docs/data-sources/app_deployments_v2.md
new file mode 100644
index 0000000..3046cd2
--- /dev/null
+++ b/docs/data-sources/app_deployments_v2.md
@@ -0,0 +1,68 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_app_deployments_v2 Data Source - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Reads `Deployments` from the runtime manager for a given organization and environment.
+ This only works for Cloudhub V2 and Runtime Fabrics Apps.
+---
+
+# anypoint_app_deployments_v2 (Data Source)
+
+Reads `Deployments` from the runtime manager for a given organization and environment.
+ This only works for Cloudhub V2 and Runtime Fabrics Apps.
+
+## Example Usage
+
+```terraform
+data "anypoint_app_deployments_v2" "apps" {
+ org_id = var.root_org
+ env_id = var.env_id
+}
+```
+
+
+## Schema
+
+### Required
+
+- `env_id` (String) The environment id where to get deployments from
+- `org_id` (String) The organization where to query deployments.
+
+### Optional
+
+- `params` (Block Set, Max: 1) The search parameters. Should only provide one occurrence of the block. (see [below for nested schema](#nestedblock--params))
+
+### Read-Only
+
+- `deployments` (List of Object) The result of the query with the list of all deployments. (see [below for nested schema](#nestedatt--deployments))
+- `id` (String) The ID of this resource.
+- `total` (Number) The total number of available results
+
+
+### Nested Schema for `params`
+
+Optional:
+
+- `limit` (Number) Limit the number of elements in the response.
+- `offset` (Number) Skip over a number of elements by specifying an offset value for the query.
+- `target_id` (String) The id of the target the deployments are deployed to.
+
+
+
+### Nested Schema for `deployments`
+
+Read-Only:
+
+- `application_status` (String)
+- `creation_date` (Number)
+- `current_runtime_version` (String)
+- `id` (String)
+- `last_modified_date` (Number)
+- `last_successful_runtime_version` (String)
+- `name` (String)
+- `status` (String)
+- `target_id` (String)
+- `target_provider` (String)
+
+
diff --git a/docs/data-sources/fabrics.md b/docs/data-sources/fabrics.md
new file mode 100644
index 0000000..e9ebf3b
--- /dev/null
+++ b/docs/data-sources/fabrics.md
@@ -0,0 +1,141 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_fabrics Data Source - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Reads a specific `Runtime Fabrics'` instance.
+---
+
+# anypoint_fabrics (Data Source)
+
+Reads a specific `Runtime Fabrics'` instance.
+
+## Example Usage
+
+```terraform
+data "anypoint_fabrics" "rtf" {
+ id = "YOUR_FABRICS_ID"
+ org_id = var.org_id
+}
+```
+
+
+## Schema
+
+### Required
+
+- `id` (String) The unique id of the fabrics instance in the platform.
+- `org_id` (String) The organization id where the fabrics is hosted.
+
+### Read-Only
+
+- `activation_data` (String) The activation data to use during installation of fabrics on the kubernetes cluster. Only available when instance is created and not activated yet.
+- `app_scoped_log_forwarding` (Boolean) Whether app scoped log forwarding is active.
+- `available_upgrade_version` (String) The available upgrade version of fabrics.
+- `cluster_configuration_level` (String) The configuration level of the cluster (production or development).
+- `created_at` (Number) The creation date of the fabrics instance.
+- `desired_version` (String) The desired version of fabrics.
+- `features` (List of Object) The features of this cluster. (see [below for nested schema](#nestedatt--features))
+- `ingress` (List of Object) The ingress configurations of this cluster. (see [below for nested schema](#nestedatt--ingress))
+- `is_helm_managed` (Boolean) Whether this cluster is managed by helmet.
+- `is_managed` (Boolean) Whether this cluster is managed.
+- `kubernetes_version` (String) The kubernetes version of the cluster.
+- `license_expiry_date` (Number) The expiry date of the license (timestamp).
+- `name` (String) The name of this fabrics instance.
+- `namespace` (String) The namespace where runtime fabrics is installed.
+- `nodes` (List of Object) The list of fabrics nodes. (see [below for nested schema](#nestedatt--nodes))
+- `region` (String) The region where fabrics instance is hosted.
+- `seconds_since_heartbeat` (Number) The number of seconds since last heartbeat.
+- `status` (String) The status of the farbics instance.
+- `upgrade` (List of Object) The status of the fabrics. Only available when instance is created and not activated yet. (see [below for nested schema](#nestedatt--upgrade))
+- `vendor` (String) The vendor name of the kubernetes instance hosting fabrics.
+- `vendor_metadata` (Map of String) The vendor metadata
+- `version` (String) The version of fabrics.
+
+
+### Nested Schema for `features`
+
+Read-Only:
+
+- `enhanced_security` (Boolean)
+- `persistent_store` (Boolean)
+
+
+
+### Nested Schema for `ingress`
+
+Read-Only:
+
+- `domains` (List of String)
+
+
+
+### Nested Schema for `nodes`
+
+Read-Only:
+
+- `allocated_limit_capacity` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--allocated_limit_capacity))
+- `allocated_request_capacity` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--allocated_request_capacity))
+- `capacity` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--capacity))
+- `docker_version` (String)
+- `kubelet_version` (String)
+- `name` (String)
+- `role` (String)
+- `status` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--status))
+- `uid` (String)
+
+
+### Nested Schema for `nodes.allocated_limit_capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `nodes.allocated_request_capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `nodes.capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `nodes.status`
+
+Read-Only:
+
+- `is_healthy` (Boolean)
+- `is_ready` (Boolean)
+- `is_schedulable` (Boolean)
+
+
+
+
+### Nested Schema for `upgrade`
+
+Read-Only:
+
+- `status` (String)
+
+
diff --git a/docs/data-sources/fabrics_associations.md b/docs/data-sources/fabrics_associations.md
new file mode 100644
index 0000000..ec7695e
--- /dev/null
+++ b/docs/data-sources/fabrics_associations.md
@@ -0,0 +1,49 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_fabrics_associations Data Source - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Reads all `Runtime Fabrics'` available in your org.
+---
+
+# anypoint_fabrics_associations (Data Source)
+
+Reads all `Runtime Fabrics'` available in your org.
+
+## Example Usage
+
+```terraform
+data "anypoint_fabrics_associations" "assoc" {
+ fabrics_id = "YOUR_FABRICS_ID"
+ org_id = var.root_org
+}
+
+output "associations" {
+ value = data.anypoint_fabrics_associations.assoc.associations
+}
+```
+
+
+## Schema
+
+### Required
+
+- `fabrics_id` (String) The runtime fabrics id
+- `org_id` (String) The business group id
+
+### Read-Only
+
+- `associations` (List of Object) (see [below for nested schema](#nestedatt--associations))
+- `id` (String) The ID of this resource.
+- `total` (Number) The total number of available results
+
+
+### Nested Schema for `associations`
+
+Read-Only:
+
+- `env_id` (String)
+- `id` (String)
+- `org_id` (String)
+
+
diff --git a/docs/data-sources/fabrics_health.md b/docs/data-sources/fabrics_health.md
new file mode 100644
index 0000000..9ff66aa
--- /dev/null
+++ b/docs/data-sources/fabrics_health.md
@@ -0,0 +1,208 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_fabrics_health Data Source - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Reads `Runtime Fabrics'` health and monitoring metrics.
+---
+
+# anypoint_fabrics_health (Data Source)
+
+Reads `Runtime Fabrics'` health and monitoring metrics.
+
+## Example Usage
+
+```terraform
+data "anypoint_fabrics_health" "health" {
+ fabrics_id = "YOUR_FABRICS_ID"
+ org_id = var.org_id
+}
+```
+
+
+## Schema
+
+### Required
+
+- `fabrics_id` (String) The runtime fabrics id
+- `org_id` (String) The business group id
+
+### Read-Only
+
+- `anypoint_monitoring` (List of Object) The ability to see metrics and logs in Anypoint Monitoring. (see [below for nested schema](#nestedatt--anypoint_monitoring))
+- `appliance` (List of Object) Detailed status of the appliance, when applicable. (see [below for nested schema](#nestedatt--appliance))
+- `cluster_monitoring` (List of Object) The ability to monitor and report the status of the Runtime Fabric cluster. (see [below for nested schema](#nestedatt--cluster_monitoring))
+- `external_log_forwarding` (List of Object) The ability to forward application logs to an external provider. (see [below for nested schema](#nestedatt--external_log_forwarding))
+- `id` (String) The ID of this resource.
+- `infrastructure` (List of Object) Detailed status of the infrastructure supporting the Runtime Fabric cluster. (see [below for nested schema](#nestedatt--infrastructure))
+- `load_balancing` (List of Object) The ability to accept inbound requests and load-balance across different replicas of application instances. (see [below for nested schema](#nestedatt--load_balancing))
+- `manage_deployments` (List of Object) The ability to create, update, or delete application deployments in this Runtime Fabric. (see [below for nested schema](#nestedatt--manage_deployments))
+- `persistent_gateway` (List of Object) Detailed status of the persistent gateway for Runtime Fabric cluster. (see [below for nested schema](#nestedatt--persistent_gateway))
+
+
+### Nested Schema for `anypoint_monitoring`
+
+Read-Only:
+
+- `failed_probes` (List of Object) (see [below for nested schema](#nestedobjatt--anypoint_monitoring--failed_probes))
+- `healthy` (Boolean)
+- `probes` (String)
+- `updated_at` (Number)
+
+
+### Nested Schema for `anypoint_monitoring.failed_probes`
+
+Read-Only:
+
+- `last_transition_at` (Number)
+- `name` (String)
+- `reason` (String)
+
+
+
+
+### Nested Schema for `appliance`
+
+Read-Only:
+
+- `failed_probes` (List of Object) (see [below for nested schema](#nestedobjatt--appliance--failed_probes))
+- `healthy` (Boolean)
+- `probes` (String)
+- `updated_at` (Number)
+
+
+### Nested Schema for `appliance.failed_probes`
+
+Read-Only:
+
+- `last_transition_at` (Number)
+- `name` (String)
+- `reason` (String)
+
+
+
+
+### Nested Schema for `cluster_monitoring`
+
+Read-Only:
+
+- `failed_probes` (List of Object) (see [below for nested schema](#nestedobjatt--cluster_monitoring--failed_probes))
+- `healthy` (Boolean)
+- `probes` (String)
+- `updated_at` (Number)
+
+
+### Nested Schema for `cluster_monitoring.failed_probes`
+
+Read-Only:
+
+- `last_transition_at` (Number)
+- `name` (String)
+- `reason` (String)
+
+
+
+
+### Nested Schema for `external_log_forwarding`
+
+Read-Only:
+
+- `failed_probes` (List of Object) (see [below for nested schema](#nestedobjatt--external_log_forwarding--failed_probes))
+- `healthy` (Boolean)
+- `probes` (String)
+- `updated_at` (Number)
+
+
+### Nested Schema for `external_log_forwarding.failed_probes`
+
+Read-Only:
+
+- `last_transition_at` (Number)
+- `name` (String)
+- `reason` (String)
+
+
+
+
+### Nested Schema for `infrastructure`
+
+Read-Only:
+
+- `failed_probes` (List of Object) (see [below for nested schema](#nestedobjatt--infrastructure--failed_probes))
+- `healthy` (Boolean)
+- `probes` (String)
+- `updated_at` (Number)
+
+
+### Nested Schema for `infrastructure.failed_probes`
+
+Read-Only:
+
+- `last_transition_at` (Number)
+- `name` (String)
+- `reason` (String)
+
+
+
+
+### Nested Schema for `load_balancing`
+
+Read-Only:
+
+- `failed_probes` (List of Object) (see [below for nested schema](#nestedobjatt--load_balancing--failed_probes))
+- `healthy` (Boolean)
+- `probes` (String)
+- `updated_at` (Number)
+
+
+### Nested Schema for `load_balancing.failed_probes`
+
+Read-Only:
+
+- `last_transition_at` (Number)
+- `name` (String)
+- `reason` (String)
+
+
+
+
+### Nested Schema for `manage_deployments`
+
+Read-Only:
+
+- `failed_probes` (List of Object) (see [below for nested schema](#nestedobjatt--manage_deployments--failed_probes))
+- `healthy` (Boolean)
+- `probes` (String)
+- `updated_at` (Number)
+
+
+### Nested Schema for `manage_deployments.failed_probes`
+
+Read-Only:
+
+- `last_transition_at` (Number)
+- `name` (String)
+- `reason` (String)
+
+
+
+
+### Nested Schema for `persistent_gateway`
+
+Read-Only:
+
+- `failed_probes` (List of Object) (see [below for nested schema](#nestedobjatt--persistent_gateway--failed_probes))
+- `healthy` (Boolean)
+- `probes` (String)
+- `updated_at` (Number)
+
+
+### Nested Schema for `persistent_gateway.failed_probes`
+
+Read-Only:
+
+- `last_transition_at` (Number)
+- `name` (String)
+- `reason` (String)
+
+
diff --git a/docs/data-sources/fabrics_helm_repo.md b/docs/data-sources/fabrics_helm_repo.md
new file mode 100644
index 0000000..485cd73
--- /dev/null
+++ b/docs/data-sources/fabrics_helm_repo.md
@@ -0,0 +1,35 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_fabrics_helm_repo Data Source - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Reads `Runtime Fabrics'` Helm repository properties.
+---
+
+# anypoint_fabrics_helm_repo (Data Source)
+
+Reads `Runtime Fabrics'` Helm repository properties.
+
+## Example Usage
+
+```terraform
+data "anypoint_fabrics_helm_repo" "repo" {
+ org_id = var.org_id
+}
+```
+
+
+## Schema
+
+### Required
+
+- `org_id` (String) The business group id
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
+- `rtf_image_registry_endpoint` (String) The runtime fabrics image registry endpoint
+- `rtf_image_registry_password` (String, Sensitive) The password to authenticated to the image registry
+- `rtf_image_registry_user` (String) The user to authenticated to the image registry
+
+
diff --git a/docs/data-sources/fabrics_list.md b/docs/data-sources/fabrics_list.md
new file mode 100644
index 0000000..84bcb30
--- /dev/null
+++ b/docs/data-sources/fabrics_list.md
@@ -0,0 +1,154 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_fabrics_list Data Source - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Reads all `Runtime Fabrics'` available in your org.
+---
+
+# anypoint_fabrics_list (Data Source)
+
+Reads all `Runtime Fabrics'` available in your org.
+
+## Example Usage
+
+```terraform
+data "anypoint_fabrics_list" "all" {
+ org_id = var.org_id
+}
+
+output "all" {
+ value = data.anypoint_fabrics_list.all.list
+}
+```
+
+
+## Schema
+
+### Required
+
+- `org_id` (String) The business group id
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
+- `list` (List of Object) (see [below for nested schema](#nestedatt--list))
+- `total` (Number) The total number of available results
+
+
+### Nested Schema for `list`
+
+Read-Only:
+
+- `activation_data` (String)
+- `app_scoped_log_forwarding` (Boolean)
+- `available_upgrade_version` (String)
+- `cluster_configuration_level` (String)
+- `created_at` (Number)
+- `desired_version` (String)
+- `features` (List of Object) (see [below for nested schema](#nestedobjatt--list--features))
+- `id` (String)
+- `ingress` (List of Object) (see [below for nested schema](#nestedobjatt--list--ingress))
+- `is_helm_managed` (Boolean)
+- `is_managed` (Boolean)
+- `kubernetes_version` (String)
+- `license_expiry_date` (Number)
+- `name` (String)
+- `namespace` (String)
+- `nodes` (List of Object) (see [below for nested schema](#nestedobjatt--list--nodes))
+- `org_id` (String)
+- `region` (String)
+- `seconds_since_heartbeat` (Number)
+- `status` (String)
+- `upgrade` (List of Object) (see [below for nested schema](#nestedobjatt--list--upgrade))
+- `vendor` (String)
+- `vendor_metadata` (Map of String)
+- `version` (String)
+
+
+### Nested Schema for `list.features`
+
+Read-Only:
+
+- `enhanced_security` (Boolean)
+- `persistent_store` (Boolean)
+
+
+
+### Nested Schema for `list.ingress`
+
+Read-Only:
+
+- `domains` (List of String)
+
+
+
+### Nested Schema for `list.nodes`
+
+Read-Only:
+
+- `allocated_limit_capacity` (List of Object) (see [below for nested schema](#nestedobjatt--list--nodes--allocated_limit_capacity))
+- `allocated_request_capacity` (List of Object) (see [below for nested schema](#nestedobjatt--list--nodes--allocated_request_capacity))
+- `capacity` (List of Object) (see [below for nested schema](#nestedobjatt--list--nodes--capacity))
+- `docker_version` (String)
+- `kubelet_version` (String)
+- `name` (String)
+- `role` (String)
+- `status` (List of Object) (see [below for nested schema](#nestedobjatt--list--nodes--status))
+- `uid` (String)
+
+
+### Nested Schema for `list.nodes.allocated_limit_capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `list.nodes.allocated_request_capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `list.nodes.capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `list.nodes.status`
+
+Read-Only:
+
+- `is_healthy` (Boolean)
+- `is_ready` (Boolean)
+- `is_schedulable` (Boolean)
+
+
+
+
+### Nested Schema for `list.upgrade`
+
+Read-Only:
+
+- `status` (String)
+
+
diff --git a/docs/data-sources/roles.md b/docs/data-sources/roles.md
index 15efb47..04738c6 100644
--- a/docs/data-sources/roles.md
+++ b/docs/data-sources/roles.md
@@ -3,12 +3,12 @@
page_title: "anypoint_roles Data Source - terraform-provider-anypoint"
subcategory: ""
description: |-
- Reads all `roles` availabble.
+ Reads all `roles` available.
---
# anypoint_roles (Data Source)
-Reads all `roles` availabble.
+Reads all `roles` available.
## Example Usage
diff --git a/docs/data-sources/secretgroup.md b/docs/data-sources/secretgroup.md
index feffaf3..cbd5a88 100644
--- a/docs/data-sources/secretgroup.md
+++ b/docs/data-sources/secretgroup.md
@@ -10,7 +10,15 @@ description: |-
Query a specific secret-group in a given organization and environment.
-
+## Example Usage
+
+```terraform
+data "anypoint_secretgroup" "secretgroup" {
+ id = "your_secretgroup_id"
+ org_id = var.org_id
+ env_id = var.env_id
+}
+```
## Schema
diff --git a/docs/resources/apim_policy_jwt_validation.md b/docs/resources/apim_policy_jwt_validation.md
index 4a8dc4f..169345f 100644
--- a/docs/resources/apim_policy_jwt_validation.md
+++ b/docs/resources/apim_policy_jwt_validation.md
@@ -96,7 +96,9 @@ Required:
Optional:
+- `claims_to_headers` (List of String) List of strings with claims
- `client_id_expression` (String) Expression to obtain the Client ID from the request in order to validate it.
+- `custom_key_expression` (String) Data weave expression with custom key.
- `jwks_service_connection_timeout` (Number) Timeout specification, in milliseconds, when reaching the JWKS service. Default value is 10 seconds.
- `jwks_service_time_to_live` (Number) The amount of time, in minutes, that the JWKS will be considered valid. Once the JWKS has expired, it will have to be retrieved again.
Default value is 1 hour. Ignore this field if the JWT Signing Method was set to None.
diff --git a/docs/resources/cloudhub2_shared_space_deployment.md b/docs/resources/cloudhub2_shared_space_deployment.md
new file mode 100644
index 0000000..a5c1d99
--- /dev/null
+++ b/docs/resources/cloudhub2_shared_space_deployment.md
@@ -0,0 +1,344 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_cloudhub2_shared_space_deployment Resource - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Creates and manages a `deployment` of a mule app on Cloudhub v2 Shared-Space only.
+---
+
+# anypoint_cloudhub2_shared_space_deployment (Resource)
+
+Creates and manages a `deployment` of a mule app on Cloudhub v2 Shared-Space only.
+
+## Example Usage
+
+```terraform
+resource "anypoint_cloudhub2_shared_space_deployment" "deployment" {
+ org_id = var.root_org
+ env_id = var.env_id
+ name = "your-awesome-app"
+ application {
+ desired_state = "STARTED"
+ vcores = 0.1
+ object_store_v2_enabled = true
+ ref {
+ group_id = var.root_org
+ artifact_id = "your-awesome-app-artifact"
+ version = "1.0.0"
+ packaging = "jar"
+ }
+ configuration {
+ mule_agent_app_props_service {
+ properties = {
+ props1 = "value"
+ props2 = "value"
+ }
+ secure_properties = {
+ secure_props1 = "secret_value"
+ }
+ }
+ mule_agent_logging_service {
+ scope_logging_configurations {
+ scope = "mule.package"
+ log_level = "DEBUG"
+ }
+ }
+ }
+ }
+
+ target {
+ provider = "MC"
+ target_id = "cloudhub-us-east-1"
+ replicas = 1
+ deployment_settings {
+ clustered = false
+ jvm_args = ""
+ update_strategy = "rolling"
+ disable_am_log_forwarding = true
+ disable_external_log_forwarding = true
+ generate_default_public_url = true
+ runtime {
+ version = "4.7.0:20e-java8"
+ }
+ http {
+ inbound_last_mile_security = true
+ }
+ }
+ }
+}
+```
+
+
+## Schema
+
+### Required
+
+- `application` (Block List, Min: 1, Max: 1) The details of the application to deploy (see [below for nested schema](#nestedblock--application))
+- `env_id` (String) The environment where mule app is deployed.
+- `name` (String) The name of the deployed mule app.
+- `org_id` (String) The organization where the mule app is deployed.
+- `target` (Block List, Min: 1, Max: 1) The details of the target to perform the deployment on. (see [below for nested schema](#nestedblock--target))
+
+### Read-Only
+
+- `creation_date` (Number) The creation date of the mule app.
+- `desired_version` (String) The deployment desired version of the mule app.
+- `id` (String) The unique id of the mule app deployment in the platform.
+- `last_modified_date` (Number) The last modification date of the mule app.
+- `last_successful_version` (String) The last successfully deployed version
+- `replicas` (List of Object) Data of the mule app replicas (see [below for nested schema](#nestedatt--replicas))
+- `status` (String) Data of the mule app replicas
+
+
+### Nested Schema for `application`
+
+Required:
+
+- `configuration` (Block List, Min: 1, Max: 1) The configuration of the application. (see [below for nested schema](#nestedblock--application--configuration))
+- `ref` (Block List, Min: 1, Max: 1) The reference to the artifact on Exchange that is to be deployed on Cloudhub 2.0.
+ Please ensure the application's artifact is deployed on Exchange before using this resource on Cloudhub 2.0. (see [below for nested schema](#nestedblock--application--ref))
+- `vcores` (Number) The allocated virtual cores. Acceptable Values are: 0.1 / 0.2 / 0.5 / 1 / 1.5 / 2 / 2.5 / 3 / 3.5 / 4
+
+Optional:
+
+- `desired_state` (String) The desired state of the application.
+- `object_store_v2_enabled` (Boolean) Whether object store v2 is enabled.
+
+Read-Only:
+
+- `status` (String) The status of the application.
+
+
+### Nested Schema for `application.configuration`
+
+Required:
+
+- `mule_agent_app_props_service` (Block List, Min: 1, Max: 1) The mule app properties (see [below for nested schema](#nestedblock--application--configuration--mule_agent_app_props_service))
+
+Optional:
+
+- `mule_agent_logging_service` (Block List, Max: 1) The mule app logging props (see [below for nested schema](#nestedblock--application--configuration--mule_agent_logging_service))
+
+Read-Only:
+
+- `mule_agent_scheduling_service` (List of Object) The mule app scheduling (see [below for nested schema](#nestedatt--application--configuration--mule_agent_scheduling_service))
+
+
+### Nested Schema for `application.configuration.mule_agent_app_props_service`
+
+Optional:
+
+- `properties` (Map of String) The mule application properties.
+- `secure_properties` (Map of String) The mule application secured properties.
+
+Read-Only:
+
+- `application_name` (String) The application name
+
+
+
+### Nested Schema for `application.configuration.mule_agent_logging_service`
+
+Optional:
+
+- `scope_logging_configurations` (Block List) Additional log levels and categories to include in logs. (see [below for nested schema](#nestedblock--application--configuration--mule_agent_logging_service--scope_logging_configurations))
+
+Read-Only:
+
+- `artifact_name` (String) The application name.
+
+
+### Nested Schema for `application.configuration.mule_agent_logging_service.scope_logging_configurations`
+
+Required:
+
+- `log_level` (String) The application log level: INFO / DEBUG / WARNING / ERROR / FATAL
+- `scope` (String) The logging package scope
+
+
+
+
+### Nested Schema for `application.configuration.mule_agent_scheduling_service`
+
+Read-Only:
+
+- `application_name` (String)
+- `schedulers` (List of Object) (see [below for nested schema](#nestedobjatt--application--configuration--mule_agent_scheduling_service--schedulers))
+
+
+### Nested Schema for `application.configuration.mule_agent_scheduling_service.schedulers`
+
+Read-Only:
+
+- `enabled` (Boolean)
+- `expression` (String)
+- `flow_name` (String)
+- `frequency` (String)
+- `name` (String)
+- `start_delay` (String)
+- `time_unit` (String)
+- `time_zone` (String)
+- `type` (String)
+
+
+
+
+
+### Nested Schema for `application.ref`
+
+Required:
+
+- `artifact_id` (String) The artifactId of the application.
+- `group_id` (String) The groupId of the application.
+- `packaging` (String) The packaging of the application. Only 'jar' is supported.
+- `version` (String) The version of the application.
+
+
+
+
+### Nested Schema for `target`
+
+Required:
+
+- `deployment_settings` (Block List, Min: 1, Max: 1) The settings of the target for the deployment to perform. (see [below for nested schema](#nestedblock--target--deployment_settings))
+- `target_id` (String) The unique identifier of the target within Cloudhub 2.0.
+ Checkout the [documentation](https://docs.mulesoft.com/cloudhub-2/ch2-architecture#regions-and-dns-records) for more info
+
+Optional:
+
+- `provider` (String) The cloud provider the target belongs to.
+- `replicas` (Number) The number of replicas. Default is 1.
+
+
+### Nested Schema for `target.deployment_settings`
+
+Optional:
+
+- `autoscaling` (Block List, Max: 1) Use this object to provide CPU Based Horizontal Autoscaling configuration on deployment and redeployment operations. This object is optional.
+ If Autoscaling is disabled and the fields "minReplicas" and "maxReplicas" are provided, they must match the value of "target.replicas" field.
+ Learn more about Autoscaling [here](https://docs.mulesoft.com/cloudhub-2/ch2-configure-horizontal-autoscaling). (see [below for nested schema](#nestedblock--target--deployment_settings--autoscaling))
+- `clustered` (Boolean) Whether the application is deployed in clustered mode.
+- `disable_am_log_forwarding` (Boolean) Whether log forwarding is disabled.
+- `disable_external_log_forwarding` (Boolean) Whether the log forwarding is disabled.
+- `generate_default_public_url` (Boolean) Whether default public url should be generated.
+- `http` (Block List, Max: 1) The details about http inbound or outbound configuration (see [below for nested schema](#nestedblock--target--deployment_settings--http))
+- `jvm_args` (String) The java virtual machine arguments
+- `runtime` (Block List, Max: 1) The Mule app runtime version info. (see [below for nested schema](#nestedblock--target--deployment_settings--runtime))
+- `update_strategy` (String) The mule app deployment update strategy: rolling or recreate
+
+Read-Only:
+
+- `anypoint_monitoring_scope` (String) The anypoint moniroting scope
+- `persistent_object_store` (Boolean) Whether persistent object store is enabled. Only for RTF
+- `resources` (List of Object) The mule app allocated resources (see [below for nested schema](#nestedatt--target--deployment_settings--resources))
+- `sidecars` (List of Object) The mule app sidecars. (see [below for nested schema](#nestedatt--target--deployment_settings--sidecars))
+- `tracing_enabled` (Boolean) Whether the log tracing is enabled.
+
+
+### Nested Schema for `target.deployment_settings.autoscaling`
+
+Required:
+
+- `enabled` (Boolean) Enables or disables the Autoscaling feature. The possible values are: true or false.
+
+Optional:
+
+- `max_replicas` (Number) Set the maximum amount of replicas your application can scale to. The minimum accepted value is 2. The maximum is 32.
+- `min_replicas` (Number) Set the minimum amount of replicas for your deployment. The minimum accepted value is 1. The maximum is 3.
+
+
+
+### Nested Schema for `target.deployment_settings.http`
+
+Optional:
+
+- `inbound_last_mile_security` (Boolean) Last-mile security means that the connection between ingress and the actual Mule app will be HTTPS.
+
+Read-Only:
+
+- `inbound_forward_ssl_session` (Boolean) Whether to forward the ssl session. This option is disabled for shared-space.
+- `inbound_internal_url` (String) The inbound internal url.
+- `inbound_path_rewrite` (String) The inbound path rewrite. This option is disabled for shared-space.
+- `inbound_public_url` (String) The inbound public url. Setting the public url is disabled for shared-space.
+- `inbound_unique_id` (String) The inbound unique id.
+
+
+
+### Nested Schema for `target.deployment_settings.runtime`
+
+Required:
+
+- `version` (String) On deployment operations it can be set to:
+ - a full image version with tag (i.e "4.6.0:40e-java17"),
+ - a base version with a partial tag not indicating the java version (i.e. "4.6.0:40")
+ - or only a base version (i.e. "4.6.0").
+ Defaults to the latest image version.
+ This field has precedence over the legacy 'target.deploymentSettings.runtimeVersion'.
+ Learn more about Mule runtime release notes [here](https://docs.mulesoft.com/release-notes/runtime-fabric/runtime-fabric-runtimes-release-notes)
+
+Optional:
+
+- `java` (String) On deployment operations it can be set to one of:
+ - "8"
+ - "17"
+ Defaults to "8".
+ Learn more about Java support [here](https://docs.mulesoft.com/general/java-support).
+- `release_channel` (String) On deployment operations it can be set to one of:
+ - "LTS"
+ - "EDGE"
+ - "LEGACY".
+ Defaults to "EDGE". This field has precedence over the legacy 'target.deploymentSettings.runtimeReleaseChannel'.
+ Learn more on release channels [here](https://docs.mulesoft.com/release-notes/mule-runtime/lts-edge-release-cadence).
+
+
+
+### Nested Schema for `target.deployment_settings.resources`
+
+Read-Only:
+
+- `cpu_limit` (String)
+- `cpu_reserved` (String)
+- `memory_limit` (String)
+- `memory_reserved` (String)
+- `storage_limit` (String)
+- `storage_reserved` (String)
+
+
+
+### Nested Schema for `target.deployment_settings.sidecars`
+
+Read-Only:
+
+- `anypoint_monitoring_image` (String)
+- `anypoint_monitoring_resources_cpu_limit` (String)
+- `anypoint_monitoring_resources_cpu_reserved` (String)
+- `anypoint_monitoring_resources_memory_limit` (String)
+- `anypoint_monitoring_resources_memory_reserved` (String)
+
+
+
+
+
+### Nested Schema for `replicas`
+
+Read-Only:
+
+- `current_deployment_version` (String)
+- `deployment_location` (String)
+- `id` (String)
+- `reason` (String)
+- `state` (String)
+
+## Import
+
+Import is supported using the following syntax:
+
+```shell
+# In order for the import to work, you should provide a ID composed of the following:
+# {ORG_ID}/{ENV_ID}/{DEPLOYMENT_ID}
+
+terraform import \
+ -var-file params.tfvars.json \ #variables file
+ anypoint_cloudhub2_shared_space_deployment.deployment \ #resource name
+ aa1f55d6-213d-4f60-845c-201282484cd1/7074fcdd-9b23-4ae3-97e8-5db5f4adf17e/de32fc9d-6b25-4d6f-bd5e-cac32272b2f7 #resource ID
+```
diff --git a/docs/resources/fabrics.md b/docs/resources/fabrics.md
new file mode 100644
index 0000000..8db1ee1
--- /dev/null
+++ b/docs/resources/fabrics.md
@@ -0,0 +1,163 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_fabrics Resource - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Creates a `Runtime Fabrics` instance.
+---
+
+# anypoint_fabrics (Resource)
+
+Creates a `Runtime Fabrics` instance.
+
+## Example Usage
+
+```terraform
+resource "anypoint_fabrics" "fabrics" {
+ org_id = var.root_org
+ name = "terraform-eks-rtf"
+ region = "us-east-1"
+ vendor = "eks"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `name` (String) The name of the fabrics
+- `org_id` (String) The organization id where the fabrics is defined.
+- `region` (String) The region where fabrics instance is hosted. Refer to the official documentation for the list of available regions.
+ The list of regions is available [here](https://docs.mulesoft.com/cloudhub-2/ch2-architecture#regions-and-dns-records).
+ Examples: us-east-1 / us-east-2
+- `vendor` (String) The vendor name of the kubernetes instance hosting fabrics. The following values are supported:
+ * eks: AWS Elastic Kubernetes Service
+ * aks: Azure Kubernetes Service
+ * gke: Google Kubernetes Service
+ * ack: Alibaba Kubernetes Service
+ * openshift: Openshift
+ * rancher: Rancher
+
+### Read-Only
+
+- `activation_data` (String) The activation data to use during installation of fabrics on the kubernetes cluster. Only available when instance is created and not activated yet.
+- `app_scoped_log_forwarding` (Boolean) Whether app scoped log forwarding is active.
+- `available_upgrade_version` (String) The available upgrade version of fabrics.
+- `cluster_configuration_level` (String) The configuration level of the cluster (production or development).
+- `created_at` (Number) The creation date of the fabrics instance
+- `desired_version` (String) The desired version of fabrics.
+- `features` (List of Object) The features of this cluster. (see [below for nested schema](#nestedatt--features))
+- `id` (String) The unique id of this fabrics generated by the anypoint platform.
+- `ingress` (List of Object) The ingress configurations of this cluster. (see [below for nested schema](#nestedatt--ingress))
+- `is_helm_managed` (Boolean) Whether this cluster is managed by helmet.
+- `is_managed` (Boolean) Whether this cluster is managed.
+- `kubernetes_version` (String) The kubernetes version of the cluster.
+- `license_expiry_date` (Number) The expiry date of the license (timestamp).
+- `namespace` (String) The namespace where runtime fabrics is installed.
+- `nodes` (List of Object) The list of fabrics nodes. (see [below for nested schema](#nestedatt--nodes))
+- `seconds_since_heartbeat` (Number) The number of seconds since last heartbeat.
+- `status` (String) The status of the farbics instance.
+- `upgrade` (Block List) The status of the fabrics. Only available when instance is created and not activated yet. This cannot be set by user, any value the user puts is ignored. (see [below for nested schema](#nestedblock--upgrade))
+- `vendor_metadata` (Map of String) The vendor metadata
+- `version` (String) The version of fabrics.
+
+
+### Nested Schema for `features`
+
+Read-Only:
+
+- `enhanced_security` (Boolean)
+- `persistent_store` (Boolean)
+
+
+
+### Nested Schema for `ingress`
+
+Read-Only:
+
+- `domains` (List of String)
+
+
+
+### Nested Schema for `nodes`
+
+Read-Only:
+
+- `allocated_limit_capacity` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--allocated_limit_capacity))
+- `allocated_request_capacity` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--allocated_request_capacity))
+- `capacity` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--capacity))
+- `docker_version` (String)
+- `kubelet_version` (String)
+- `name` (String)
+- `role` (String)
+- `status` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--status))
+- `uid` (String)
+
+
+### Nested Schema for `nodes.allocated_limit_capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `nodes.allocated_request_capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `nodes.capacity`
+
+Read-Only:
+
+- `cpu` (Number)
+- `cpu_millis` (Number)
+- `memory` (String)
+- `memory_mi` (Number)
+- `pods` (Number)
+
+
+
+### Nested Schema for `nodes.status`
+
+Read-Only:
+
+- `is_healthy` (Boolean)
+- `is_ready` (Boolean)
+- `is_schedulable` (Boolean)
+
+
+
+
+### Nested Schema for `upgrade`
+
+Read-Only:
+
+- `status` (String) The upgrade status.
+
+## Import
+
+Import is supported using the following syntax:
+
+```shell
+# In order for the import to work, you should provide a ID composed of the following:
+# {ORG_ID}/{FABRICS_ID}
+
+terraform import \
+ -var-file params.tfvars.json \ #variables file
+ anypoint_fabrics.rtf \ #resource name
+ aa1f55d6-213d-4f60-845c-201282484cd1/4c641268-3917-45b0-acb8-f7cb0c0318ab #resource ID
+```
diff --git a/docs/resources/fabrics_associations.md b/docs/resources/fabrics_associations.md
new file mode 100644
index 0000000..2a5e7c1
--- /dev/null
+++ b/docs/resources/fabrics_associations.md
@@ -0,0 +1,95 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_fabrics_associations Resource - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Manages `Runtime Fabrics` Environment associations.
+ NOTE: The fabrics will be associated with all sandbox environments in every available org when this resource is deleted.
+---
+
+# anypoint_fabrics_associations (Resource)
+
+Manages `Runtime Fabrics` Environment associations.
+ NOTE: The fabrics will be associated with all sandbox environments in every available org when this resource is deleted.
+
+## Example Usage
+
+```terraform
+resource "anypoint_fabrics_associations" "assoc" {
+ org_id = var.root_org
+ fabrics_id = "4c641268-3917-45b0-acb8-f7cb0c0318ab"
+
+ # Associate a specific environment in a specific org
+ associations {
+ env_id = "7074fcee-9b23-4ab6-97e8-5de5f4aef17d"
+ org_id = "aa1f00d6-213d-4f60-845b-207286484bd1"
+ }
+
+ # Associate all sandbox environments for all orgs
+ associations {
+ env_id = "sandbox"
+ org_id = "all"
+ }
+
+ # Associate all production environments for all orgs
+ associations {
+ env_id = "production"
+ org_id = "all"
+ }
+
+ # Associate all sandbox environments for a specific org
+ associations {
+ env_id = "sandbox"
+ org_id = "aa1f00d6-213d-4f60-845b-207286484bd1"
+ }
+
+ # Associate all environments for all orgs
+ associations {
+ env_id = "all"
+ org_id = "all"
+ }
+}
+```
+
+
+## Schema
+
+### Required
+
+- `associations` (Block Set, Min: 1) The list of environment associations to an instance of fabrics (see [below for nested schema](#nestedblock--associations))
+- `fabrics_id` (String) The unique id of the fabrics instance in the platform.
+- `org_id` (String) The organization id where the fabrics is hosted.
+
+### Optional
+
+- `last_updated` (String) The last time this resource has been updated locally.
+
+### Read-Only
+
+- `id` (String) The unique id of this fabrics generated by the anypoint platform.
+
+
+### Nested Schema for `associations`
+
+Required:
+
+- `env_id` (String) The environment to associate with fabrics.
+- `org_id` (String) The organization id to associate with fabrics.
+
+Read-Only:
+
+- `id` (String) The unique id of the fabrics instance in the platform.
+
+## Import
+
+Import is supported using the following syntax:
+
+```shell
+# In order for the import to work, you should provide a ID composed of the following:
+# {ORG_ID}/{ENV_ID}
+
+terraform import \
+ -var-file params.tfvars.json \ #variables file
+ anypoint_fabrics_associations.assoc \ #resource name
+ aa1f55d6-213d-4f60-845c-201282484cd1/4c641268-3917-45b0-acb8-f7cb0c0318ab #resource ID
+```
diff --git a/docs/resources/rtf_deployment.md b/docs/resources/rtf_deployment.md
new file mode 100644
index 0000000..a299d7b
--- /dev/null
+++ b/docs/resources/rtf_deployment.md
@@ -0,0 +1,360 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "anypoint_rtf_deployment Resource - terraform-provider-anypoint"
+subcategory: ""
+description: |-
+ Creates and manages a `deployment` of a mule app on Runtime Fabrics only.
+---
+
+# anypoint_rtf_deployment (Resource)
+
+Creates and manages a `deployment` of a mule app on Runtime Fabrics only.
+
+## Example Usage
+
+```terraform
+resource "anypoint_rtf_deployment" "deployment" {
+ org_id = var.root_org
+ env_id = var.env_id
+ name = "your-awesome-app"
+ application {
+ desired_state = "STARTED"
+ ref {
+ group_id = var.root_org
+ artifact_id = "your-artifact-id"
+ version = "1.0.2"
+ packaging = "jar"
+ }
+ configuration {
+ mule_agent_app_props_service {
+ properties = {
+ props1 = "value01"
+ props2 = "value02"
+ }
+ secure_properties = {
+ secure_props1 = "secret_value"
+ }
+ }
+ mule_agent_logging_service {
+ scope_logging_configurations {
+ scope = "mule.package"
+ log_level = "DEBUG"
+ }
+ }
+ }
+ }
+
+ target {
+ provider = "MC"
+ target_id = var.fabrics_id
+ replicas = 1
+ deployment_settings {
+ clustered = false
+ enforce_deploying_replicas_across_nodes = false
+ persistent_object_store = false
+ jvm_args = ""
+ update_strategy = "rolling"
+ disable_am_log_forwarding = false
+ disable_external_log_forwarding = false
+ generate_default_public_url = false
+ http {
+ inbound_public_url = "http://private.example.net/(.+),http://another.example.net/(.+)"
+ inbound_last_mile_security = true
+ inbound_forward_ssl_session = false
+ }
+ runtime {
+ version = "4.7.0:20e-java8"
+ }
+ resources {
+ cpu_reserved = "100m"
+ cpu_limit = "1000m"
+ memory_reserved = "1000Mi"
+ memory_limit = "1000Mi"
+ }
+ }
+ }
+}
+```
+
+
+## Schema
+
+### Required
+
+- `application` (Block List, Min: 1, Max: 1) The details of the application to deploy (see [below for nested schema](#nestedblock--application))
+- `env_id` (String) The environment where mule app is deployed.
+- `name` (String) The name of the deployed mule app.
+- `org_id` (String) The organization where the mule app is deployed.
+- `target` (Block List, Min: 1, Max: 1) The details of the target to perform the deployment on. (see [below for nested schema](#nestedblock--target))
+
+### Read-Only
+
+- `creation_date` (Number) The creation date of the mule app.
+- `desired_version` (String) The deployment desired version of the mule app.
+- `id` (String) The unique id of the mule app deployment in the platform.
+- `last_modified_date` (Number) The last modification date of the mule app.
+- `last_successful_version` (String) The last successfully deployed version
+- `replicas` (List of Object) Data of the mule app replicas (see [below for nested schema](#nestedatt--replicas))
+- `status` (String) Data of the mule app replicas
+
+
+### Nested Schema for `application`
+
+Required:
+
+- `configuration` (Block List, Min: 1, Max: 1) The configuration of the application. (see [below for nested schema](#nestedblock--application--configuration))
+- `ref` (Block List, Min: 1, Max: 1) The reference to the artifact on Exchange that is to be deployed on Runtime Fabrics.
+ Please ensure the application's artifact is deployed on Exchange before using this resource on Runtime Fabrics. (see [below for nested schema](#nestedblock--application--ref))
+
+Optional:
+
+- `desired_state` (String) The desired state of the application.
+
+Read-Only:
+
+- `object_store_v2_enabled` (Boolean) Whether object store v2 is enabled. Only for Cloudhub.
+- `status` (String) The status of the application.
+- `vcores` (Number) The allocated virtual cores.
+
+
+### Nested Schema for `application.configuration`
+
+Required:
+
+- `mule_agent_app_props_service` (Block List, Min: 1, Max: 1) The mule app properties (see [below for nested schema](#nestedblock--application--configuration--mule_agent_app_props_service))
+
+Optional:
+
+- `mule_agent_logging_service` (Block List, Max: 1) The mule app logging props (see [below for nested schema](#nestedblock--application--configuration--mule_agent_logging_service))
+
+Read-Only:
+
+- `mule_agent_scheduling_service` (List of Object) The mule app scheduling (see [below for nested schema](#nestedatt--application--configuration--mule_agent_scheduling_service))
+
+
+### Nested Schema for `application.configuration.mule_agent_app_props_service`
+
+Optional:
+
+- `properties` (Map of String) The mule application properties.
+- `secure_properties` (Map of String) The mule application secured properties.
+
+Read-Only:
+
+- `application_name` (String) The application name
+
+
+
+### Nested Schema for `application.configuration.mule_agent_logging_service`
+
+Optional:
+
+- `scope_logging_configurations` (Block List) Additional log levels and categories to include in logs. (see [below for nested schema](#nestedblock--application--configuration--mule_agent_logging_service--scope_logging_configurations))
+
+Read-Only:
+
+- `artifact_name` (String) The application name.
+
+
+### Nested Schema for `application.configuration.mule_agent_logging_service.scope_logging_configurations`
+
+Required:
+
+- `log_level` (String) The application log level: INFO / DEBUG / WARNING / ERROR / FATAL
+- `scope` (String) The logging package scope
+
+
+
+
+### Nested Schema for `application.configuration.mule_agent_scheduling_service`
+
+Read-Only:
+
+- `application_name` (String)
+- `schedulers` (List of Object) (see [below for nested schema](#nestedobjatt--application--configuration--mule_agent_scheduling_service--schedulers))
+
+
+### Nested Schema for `application.configuration.mule_agent_scheduling_service.schedulers`
+
+Read-Only:
+
+- `enabled` (Boolean)
+- `expression` (String)
+- `flow_name` (String)
+- `frequency` (String)
+- `name` (String)
+- `start_delay` (String)
+- `time_unit` (String)
+- `time_zone` (String)
+- `type` (String)
+
+
+
+
+
+### Nested Schema for `application.ref`
+
+Required:
+
+- `artifact_id` (String) The artifactId of the application.
+- `group_id` (String) The groupId of the application.
+- `packaging` (String) The packaging of the application. Only 'jar' is supported.
+- `version` (String) The version of the application.
+
+
+
+
+### Nested Schema for `target`
+
+Required:
+
+- `deployment_settings` (Block List, Min: 1, Max: 1) The settings of the target for the deployment to perform. (see [below for nested schema](#nestedblock--target--deployment_settings))
+- `target_id` (String) The unique identifier of the Runtime Fabrics target.
+
+Optional:
+
+- `provider` (String) The cloud provider the target belongs to.
+- `replicas` (Number) The number of replicas. Default is 1.
+
+
+### Nested Schema for `target.deployment_settings`
+
+Required:
+
+- `resources` (Block List, Min: 1, Max: 1) The mule app allocated resources. (see [below for nested schema](#nestedblock--target--deployment_settings--resources))
+
+Optional:
+
+- `autoscaling` (Block List, Max: 1) Use this object to provide CPU Based Horizontal Autoscaling configuration on deployment and redeployment operations. This object is optional.
+ If Autoscaling is disabled and the fields "minReplicas" and "maxReplicas" are provided, they must match the value of "target.replicas" field.
+ Learn more about Autoscaling [here](https://docs.mulesoft.com/cloudhub-2/ch2-configure-horizontal-autoscaling). (see [below for nested schema](#nestedblock--target--deployment_settings--autoscaling))
+- `clustered` (Boolean) Whether the application is deployed in clustered mode.
+- `disable_am_log_forwarding` (Boolean) Whether log forwarding is disabled.
+- `disable_external_log_forwarding` (Boolean) Whether the log forwarding is disabled.
+- `enforce_deploying_replicas_across_nodes` (Boolean) If true, forces the deployment of replicas across the RTF cluster. This option only available for Runtime Fabrics.
+- `generate_default_public_url` (Boolean) Whether default public url should be generated.
+- `http` (Block List, Max: 1) The details about http inbound or outbound configuration (see [below for nested schema](#nestedblock--target--deployment_settings--http))
+- `jvm_args` (String) The java virtual machine arguments
+- `persistent_object_store` (Boolean) Whether persistent object store is enabled.
+- `runtime` (Block List, Max: 1) The Mule app runtime version info. (see [below for nested schema](#nestedblock--target--deployment_settings--runtime))
+- `update_strategy` (String) The mule app deployment update strategy: rolling or recreate
+
+Read-Only:
+
+- `anypoint_monitoring_scope` (String) The anypoint moniroting scope
+- `sidecars` (List of Object) The mule app sidecars. (see [below for nested schema](#nestedatt--target--deployment_settings--sidecars))
+- `tracing_enabled` (Boolean) Whether the log tracing is enabled.
+
+
+### Nested Schema for `target.deployment_settings.resources`
+
+Required:
+
+- `cpu_limit` (String) The CPU limit
+- `cpu_reserved` (String) The CPU reserved.
+- `memory_limit` (String) The memory limit
+- `memory_reserved` (String) The memory reserved.
+
+Read-Only:
+
+- `storage_limit` (String) The storage limit
+- `storage_reserved` (String) The storage reserved
+
+
+
+### Nested Schema for `target.deployment_settings.autoscaling`
+
+Required:
+
+- `enabled` (Boolean) Enables or disables the Autoscaling feature. The possible values are: true or false.
+
+Optional:
+
+- `max_replicas` (Number) Set the maximum amount of replicas your application can scale to. The minimum accepted value is 2. The maximum is 32.
+- `min_replicas` (Number) Set the minimum amount of replicas for your deployment. The minimum accepted value is 1. The maximum is 3.
+
+
+
+### Nested Schema for `target.deployment_settings.http`
+
+Optional:
+
+- `inbound_forward_ssl_session` (Boolean) Whether to forward the ssl session.
+- `inbound_last_mile_security` (Boolean) Last-mile security means that the connection between ingress and the actual Mule app will be HTTPS.
+- `inbound_public_url` (String) The ingress url(s).
+ If you need to use multiple ingress urls, separete them with commas.
+ example: http://example.mulesoft.terraform.net/(.+)
+
+Read-Only:
+
+- `inbound_internal_url` (String) The inbound internal url.
+- `inbound_path_rewrite` (String) The inbound path rewrite. This option is only available for Cloudhub 2.0 with private spaces
+- `inbound_unique_id` (String) The inbound unique id.
+
+
+
+### Nested Schema for `target.deployment_settings.runtime`
+
+Required:
+
+- `version` (String) On deployment operations it can be set to:
+ - a full image version with tag (i.e "4.6.0:40e-java17"),
+ - a base version with a partial tag not indicating the java version (i.e. "4.6.0:40")
+ - or only a base version (i.e. "4.6.0").
+ Defaults to the latest image version.
+ This field has precedence over the legacy 'target.deploymentSettings.runtimeVersion'.
+ Learn more about Mule runtime release notes [here](https://docs.mulesoft.com/release-notes/runtime-fabric/runtime-fabric-runtimes-release-notes)
+
+Optional:
+
+- `java` (String) On deployment operations it can be set to one of:
+ - "8"
+ - "17"
+ Defaults to "8".
+ Learn more about Java support [here](https://docs.mulesoft.com/general/java-support).
+- `release_channel` (String) On deployment operations it can be set to one of:
+ - "LTS"
+ - "EDGE"
+ - "LEGACY".
+ Defaults to "EDGE". This field has precedence over the legacy 'target.deploymentSettings.runtimeReleaseChannel'.
+ Learn more on release channels [here](https://docs.mulesoft.com/release-notes/mule-runtime/lts-edge-release-cadence).
+
+
+
+### Nested Schema for `target.deployment_settings.sidecars`
+
+Read-Only:
+
+- `anypoint_monitoring_image` (String)
+- `anypoint_monitoring_resources_cpu_limit` (String)
+- `anypoint_monitoring_resources_cpu_reserved` (String)
+- `anypoint_monitoring_resources_memory_limit` (String)
+- `anypoint_monitoring_resources_memory_reserved` (String)
+
+
+
+
+
+### Nested Schema for `replicas`
+
+Read-Only:
+
+- `current_deployment_version` (String)
+- `deployment_location` (String)
+- `id` (String)
+- `reason` (String)
+- `state` (String)
+
+## Import
+
+Import is supported using the following syntax:
+
+```shell
+# In order for the import to work, you should provide a ID composed of the following:
+# {ORG_ID}/{ENV_ID}/{DEPLOYMENT_ID}
+
+terraform import \
+ -var-file params.tfvars.json \ #variables file
+ anypoint_rtf_deployment.deployment \ #resource name
+ aa1f55d6-213d-4f60-845c-201282484cd1/7074fcdd-9b23-4ae3-97e8-5db5f4adf17e/de32fc9d-6b25-4d6f-bd5e-cac32272b2f7 #resource ID
+```
diff --git a/examples/data-sources/anypoint_app_deployment_v2/data-source.tf b/examples/data-sources/anypoint_app_deployment_v2/data-source.tf
new file mode 100644
index 0000000..037ce86
--- /dev/null
+++ b/examples/data-sources/anypoint_app_deployment_v2/data-source.tf
@@ -0,0 +1,5 @@
+data "anypoint_app_deployment_v2" "app" {
+ id = "de32fc9d-6b25-4d6f-bd5e-cac32272b2f7"
+ org_id = var.root_org
+ env_id = var.env_id
+}
diff --git a/examples/data-sources/anypoint_app_deployments_v2/data-source.tf b/examples/data-sources/anypoint_app_deployments_v2/data-source.tf
new file mode 100644
index 0000000..3403bcc
--- /dev/null
+++ b/examples/data-sources/anypoint_app_deployments_v2/data-source.tf
@@ -0,0 +1,4 @@
+data "anypoint_app_deployments_v2" "apps" {
+ org_id = var.root_org
+ env_id = var.env_id
+}
diff --git a/examples/data-sources/anypoint_fabrics/data-source.tf b/examples/data-sources/anypoint_fabrics/data-source.tf
new file mode 100644
index 0000000..cb43185
--- /dev/null
+++ b/examples/data-sources/anypoint_fabrics/data-source.tf
@@ -0,0 +1,4 @@
+data "anypoint_fabrics" "rtf" {
+ id = "YOUR_FABRICS_ID"
+ org_id = var.org_id
+}
diff --git a/examples/data-sources/anypoint_fabrics_associations/data-source.tf b/examples/data-sources/anypoint_fabrics_associations/data-source.tf
new file mode 100644
index 0000000..4d90f31
--- /dev/null
+++ b/examples/data-sources/anypoint_fabrics_associations/data-source.tf
@@ -0,0 +1,8 @@
+data "anypoint_fabrics_associations" "assoc" {
+ fabrics_id = "YOUR_FABRICS_ID"
+ org_id = var.root_org
+}
+
+output "associations" {
+ value = data.anypoint_fabrics_associations.assoc.associations
+}
\ No newline at end of file
diff --git a/examples/data-sources/anypoint_fabrics_health/data-source.tf b/examples/data-sources/anypoint_fabrics_health/data-source.tf
new file mode 100644
index 0000000..6dca979
--- /dev/null
+++ b/examples/data-sources/anypoint_fabrics_health/data-source.tf
@@ -0,0 +1,4 @@
+data "anypoint_fabrics_health" "health" {
+ fabrics_id = "YOUR_FABRICS_ID"
+ org_id = var.org_id
+}
diff --git a/examples/data-sources/anypoint_fabrics_helm_repo/data-source.tf b/examples/data-sources/anypoint_fabrics_helm_repo/data-source.tf
new file mode 100644
index 0000000..d5becc6
--- /dev/null
+++ b/examples/data-sources/anypoint_fabrics_helm_repo/data-source.tf
@@ -0,0 +1,3 @@
+data "anypoint_fabrics_helm_repo" "repo" {
+ org_id = var.org_id
+}
diff --git a/examples/data-sources/anypoint_fabrics_list/data-source.tf b/examples/data-sources/anypoint_fabrics_list/data-source.tf
new file mode 100644
index 0000000..b56c9ea
--- /dev/null
+++ b/examples/data-sources/anypoint_fabrics_list/data-source.tf
@@ -0,0 +1,7 @@
+data "anypoint_fabrics_list" "all" {
+ org_id = var.org_id
+}
+
+output "all" {
+ value = data.anypoint_fabrics_list.all.list
+}
\ No newline at end of file
diff --git a/examples/data-sources/anypoint_secretgroup/data-source.tf b/examples/data-sources/anypoint_secretgroup/data-source.tf
new file mode 100644
index 0000000..a4bc88f
--- /dev/null
+++ b/examples/data-sources/anypoint_secretgroup/data-source.tf
@@ -0,0 +1,5 @@
+data "anypoint_secretgroup" "secretgroup" {
+ id = "your_secretgroup_id"
+ org_id = var.org_id
+ env_id = var.env_id
+}
\ No newline at end of file
diff --git a/examples/resources/anypoint_cloudhub2_shared_space_deployment/import.sh b/examples/resources/anypoint_cloudhub2_shared_space_deployment/import.sh
new file mode 100644
index 0000000..3b0f7b1
--- /dev/null
+++ b/examples/resources/anypoint_cloudhub2_shared_space_deployment/import.sh
@@ -0,0 +1,7 @@
+# In order for the import to work, you should provide a ID composed of the following:
+# {ORG_ID}/{ENV_ID}/{DEPLOYMENT_ID}
+
+terraform import \
+ -var-file params.tfvars.json \ #variables file
+ anypoint_cloudhub2_shared_space_deployment.deployment \ #resource name
+ aa1f55d6-213d-4f60-845c-201282484cd1/7074fcdd-9b23-4ae3-97e8-5db5f4adf17e/de32fc9d-6b25-4d6f-bd5e-cac32272b2f7 #resource ID
diff --git a/examples/resources/anypoint_cloudhub2_shared_space_deployment/resource.tf b/examples/resources/anypoint_cloudhub2_shared_space_deployment/resource.tf
new file mode 100644
index 0000000..5c152c8
--- /dev/null
+++ b/examples/resources/anypoint_cloudhub2_shared_space_deployment/resource.tf
@@ -0,0 +1,53 @@
+resource "anypoint_cloudhub2_shared_space_deployment" "deployment" {
+ org_id = var.root_org
+ env_id = var.env_id
+ name = "your-awesome-app"
+ application {
+ desired_state = "STARTED"
+ vcores = 0.1
+ object_store_v2_enabled = true
+ ref {
+ group_id = var.root_org
+ artifact_id = "your-awesome-app-artifact"
+ version = "1.0.0"
+ packaging = "jar"
+ }
+ configuration {
+ mule_agent_app_props_service {
+ properties = {
+ props1 = "value"
+ props2 = "value"
+ }
+ secure_properties = {
+ secure_props1 = "secret_value"
+ }
+ }
+ mule_agent_logging_service {
+ scope_logging_configurations {
+ scope = "mule.package"
+ log_level = "DEBUG"
+ }
+ }
+ }
+ }
+
+ target {
+ provider = "MC"
+ target_id = "cloudhub-us-east-1"
+ replicas = 1
+ deployment_settings {
+ clustered = false
+ jvm_args = ""
+ update_strategy = "rolling"
+ disable_am_log_forwarding = true
+ disable_external_log_forwarding = true
+ generate_default_public_url = true
+ runtime {
+ version = "4.7.0:20e-java8"
+ }
+ http {
+ inbound_last_mile_security = true
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/resources/anypoint_cloudhub2_shared_space_deployment/test.auto.tfvars b/examples/resources/anypoint_cloudhub2_shared_space_deployment/test.auto.tfvars
new file mode 100644
index 0000000..5aad57f
--- /dev/null
+++ b/examples/resources/anypoint_cloudhub2_shared_space_deployment/test.auto.tfvars
@@ -0,0 +1,3 @@
+root_org = "aa1f55d6-213d-4f60-845c-207286484cd1"
+owner_id = "18f23771-c78a-4be2-af8f-1bae66f43942"
+
diff --git a/examples/resources/anypoint_cloudhub2_shared_space_deployment/test.tf b/examples/resources/anypoint_cloudhub2_shared_space_deployment/test.tf
new file mode 100644
index 0000000..7611598
--- /dev/null
+++ b/examples/resources/anypoint_cloudhub2_shared_space_deployment/test.tf
@@ -0,0 +1,29 @@
+variable "root_org" {
+ default = "xx1f55d6-213d-4f60-845c-207286484cd1"
+}
+
+variable "owner_id" {
+ default = "18f23771-c78a-4be2-af8f-1bae66f43942"
+}
+
+resource "anypoint_bg" "bg" {
+ name = "TEST_BG_TF"
+ parent_organization_id = var.root_org
+ owner_id = var.owner_id
+ entitlements_createsuborgs = true
+ entitlements_createenvironments = true
+ entitlements_globaldeployment = true
+ entitlements_vcoresproduction_assigned = 0
+ entitlements_vcoressandbox_assigned = 0
+ entitlements_vcoresdesign_assigned = 0
+ entitlements_staticips_assigned = 0
+ entitlements_vpcs_assigned = 1
+ entitlements_loadbalancer_assigned = 0
+ entitlements_vpns_assigned = 1
+}
+
+resource "anypoint_env" "env" {
+ org_id = anypoint_bg.bg.id # environment related business group
+ name = "DEV" # environment name
+ type = "sandbox" # environment type : sandbox/production
+}
diff --git a/examples/resources/anypoint_fabrics/import.sh b/examples/resources/anypoint_fabrics/import.sh
new file mode 100644
index 0000000..e069086
--- /dev/null
+++ b/examples/resources/anypoint_fabrics/import.sh
@@ -0,0 +1,7 @@
+# In order for the import to work, you should provide a ID composed of the following:
+# {ORG_ID}/{FABRICS_ID}
+
+terraform import \
+ -var-file params.tfvars.json \ #variables file
+ anypoint_fabrics.rtf \ #resource name
+ aa1f55d6-213d-4f60-845c-201282484cd1/4c641268-3917-45b0-acb8-f7cb0c0318ab #resource ID
diff --git a/examples/resources/anypoint_fabrics/resource.tf b/examples/resources/anypoint_fabrics/resource.tf
new file mode 100644
index 0000000..e477981
--- /dev/null
+++ b/examples/resources/anypoint_fabrics/resource.tf
@@ -0,0 +1,6 @@
+resource "anypoint_fabrics" "fabrics" {
+ org_id = var.root_org
+ name = "terraform-eks-rtf"
+ region = "us-east-1"
+ vendor = "eks"
+}
diff --git a/examples/resources/anypoint_fabrics/test.auto.tfvars b/examples/resources/anypoint_fabrics/test.auto.tfvars
new file mode 100644
index 0000000..29641aa
--- /dev/null
+++ b/examples/resources/anypoint_fabrics/test.auto.tfvars
@@ -0,0 +1,4 @@
+root_org = "aa1f55d6-213d-4f60-845c-207286484cd1"
+owner_id = "18f23771-c78a-4be2-af8f-1bae66f43942"
+
+
diff --git a/examples/resources/anypoint_fabrics/test.tf b/examples/resources/anypoint_fabrics/test.tf
new file mode 100644
index 0000000..b2a05f4
--- /dev/null
+++ b/examples/resources/anypoint_fabrics/test.tf
@@ -0,0 +1,23 @@
+variable "root_org" {
+ default = "xx1f55d6-213d-4f60-845c-207286484cd1"
+}
+
+variable "owner_id" {
+ default = "18f23771-c78a-4be2-af8f-1bae66f43942"
+}
+
+resource "anypoint_bg" "bg" {
+ name = "TEST_BG_TF"
+ parent_organization_id = var.root_org
+ owner_id = var.owner_id
+ entitlements_createsuborgs = true
+ entitlements_createenvironments = true
+ entitlements_globaldeployment = true
+ entitlements_vcoresproduction_assigned = 0
+ entitlements_vcoressandbox_assigned = 0
+ entitlements_vcoresdesign_assigned = 0
+ entitlements_staticips_assigned = 0
+ entitlements_vpcs_assigned = 1
+ entitlements_loadbalancer_assigned = 0
+ entitlements_vpns_assigned = 1
+}
\ No newline at end of file
diff --git a/examples/resources/anypoint_fabrics_associations/import.sh b/examples/resources/anypoint_fabrics_associations/import.sh
new file mode 100644
index 0000000..bf3a697
--- /dev/null
+++ b/examples/resources/anypoint_fabrics_associations/import.sh
@@ -0,0 +1,7 @@
+# In order for the import to work, you should provide a ID composed of the following:
+# {ORG_ID}/{ENV_ID}
+
+terraform import \
+ -var-file params.tfvars.json \ #variables file
+ anypoint_fabrics_associations.assoc \ #resource name
+ aa1f55d6-213d-4f60-845c-201282484cd1/4c641268-3917-45b0-acb8-f7cb0c0318ab #resource ID
diff --git a/examples/resources/anypoint_fabrics_associations/resource.tf b/examples/resources/anypoint_fabrics_associations/resource.tf
new file mode 100644
index 0000000..dfe4682
--- /dev/null
+++ b/examples/resources/anypoint_fabrics_associations/resource.tf
@@ -0,0 +1,34 @@
+resource "anypoint_fabrics_associations" "assoc" {
+ org_id = var.root_org
+ fabrics_id = "4c641268-3917-45b0-acb8-f7cb0c0318ab"
+
+ # Associate a specific environment in a specific org
+ associations {
+ env_id = "7074fcee-9b23-4ab6-97e8-5de5f4aef17d"
+ org_id = "aa1f00d6-213d-4f60-845b-207286484bd1"
+ }
+
+ # Associate all sandbox environments for all orgs
+ associations {
+ env_id = "sandbox"
+ org_id = "all"
+ }
+
+ # Associate all production environments for all orgs
+ associations {
+ env_id = "production"
+ org_id = "all"
+ }
+
+ # Associate all sandbox environments for a specific org
+ associations {
+ env_id = "sandbox"
+ org_id = "aa1f00d6-213d-4f60-845b-207286484bd1"
+ }
+
+ # Associate all environments for all orgs
+ associations {
+ env_id = "all"
+ org_id = "all"
+ }
+}
diff --git a/examples/resources/anypoint_fabrics_associations/test.auto.tfvars b/examples/resources/anypoint_fabrics_associations/test.auto.tfvars
new file mode 100644
index 0000000..29641aa
--- /dev/null
+++ b/examples/resources/anypoint_fabrics_associations/test.auto.tfvars
@@ -0,0 +1,4 @@
+root_org = "aa1f55d6-213d-4f60-845c-207286484cd1"
+owner_id = "18f23771-c78a-4be2-af8f-1bae66f43942"
+
+
diff --git a/examples/resources/anypoint_fabrics_associations/test.tf b/examples/resources/anypoint_fabrics_associations/test.tf
new file mode 100644
index 0000000..b2a05f4
--- /dev/null
+++ b/examples/resources/anypoint_fabrics_associations/test.tf
@@ -0,0 +1,23 @@
+variable "root_org" {
+ default = "xx1f55d6-213d-4f60-845c-207286484cd1"
+}
+
+variable "owner_id" {
+ default = "18f23771-c78a-4be2-af8f-1bae66f43942"
+}
+
+resource "anypoint_bg" "bg" {
+ name = "TEST_BG_TF"
+ parent_organization_id = var.root_org
+ owner_id = var.owner_id
+ entitlements_createsuborgs = true
+ entitlements_createenvironments = true
+ entitlements_globaldeployment = true
+ entitlements_vcoresproduction_assigned = 0
+ entitlements_vcoressandbox_assigned = 0
+ entitlements_vcoresdesign_assigned = 0
+ entitlements_staticips_assigned = 0
+ entitlements_vpcs_assigned = 1
+ entitlements_loadbalancer_assigned = 0
+ entitlements_vpns_assigned = 1
+}
\ No newline at end of file
diff --git a/examples/resources/anypoint_rtf_deployment/import.sh b/examples/resources/anypoint_rtf_deployment/import.sh
new file mode 100644
index 0000000..2fc9e99
--- /dev/null
+++ b/examples/resources/anypoint_rtf_deployment/import.sh
@@ -0,0 +1,7 @@
+# In order for the import to work, you should provide a ID composed of the following:
+# {ORG_ID}/{ENV_ID}/{DEPLOYMENT_ID}
+
+terraform import \
+ -var-file params.tfvars.json \ #variables file
+ anypoint_rtf_deployment.deployment \ #resource name
+ aa1f55d6-213d-4f60-845c-201282484cd1/7074fcdd-9b23-4ae3-97e8-5db5f4adf17e/de32fc9d-6b25-4d6f-bd5e-cac32272b2f7 #resource ID
diff --git a/examples/resources/anypoint_rtf_deployment/resource.tf b/examples/resources/anypoint_rtf_deployment/resource.tf
new file mode 100644
index 0000000..884d304
--- /dev/null
+++ b/examples/resources/anypoint_rtf_deployment/resource.tf
@@ -0,0 +1,61 @@
+resource "anypoint_rtf_deployment" "deployment" {
+ org_id = var.root_org
+ env_id = var.env_id
+ name = "your-awesome-app"
+ application {
+ desired_state = "STARTED"
+ ref {
+ group_id = var.root_org
+ artifact_id = "your-artifact-id"
+ version = "1.0.2"
+ packaging = "jar"
+ }
+ configuration {
+ mule_agent_app_props_service {
+ properties = {
+ props1 = "value01"
+ props2 = "value02"
+ }
+ secure_properties = {
+ secure_props1 = "secret_value"
+ }
+ }
+ mule_agent_logging_service {
+ scope_logging_configurations {
+ scope = "mule.package"
+ log_level = "DEBUG"
+ }
+ }
+ }
+ }
+
+ target {
+ provider = "MC"
+ target_id = var.fabrics_id
+ replicas = 1
+ deployment_settings {
+ clustered = false
+ enforce_deploying_replicas_across_nodes = false
+ persistent_object_store = false
+ jvm_args = ""
+ update_strategy = "rolling"
+ disable_am_log_forwarding = false
+ disable_external_log_forwarding = false
+ generate_default_public_url = false
+ http {
+ inbound_public_url = "http://private.example.net/(.+),http://another.example.net/(.+)"
+ inbound_last_mile_security = true
+ inbound_forward_ssl_session = false
+ }
+ runtime {
+ version = "4.7.0:20e-java8"
+ }
+ resources {
+ cpu_reserved = "100m"
+ cpu_limit = "1000m"
+ memory_reserved = "1000Mi"
+ memory_limit = "1000Mi"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/resources/anypoint_rtf_deployment/test.auto.tfvars b/examples/resources/anypoint_rtf_deployment/test.auto.tfvars
new file mode 100644
index 0000000..5aad57f
--- /dev/null
+++ b/examples/resources/anypoint_rtf_deployment/test.auto.tfvars
@@ -0,0 +1,3 @@
+root_org = "aa1f55d6-213d-4f60-845c-207286484cd1"
+owner_id = "18f23771-c78a-4be2-af8f-1bae66f43942"
+
diff --git a/examples/resources/anypoint_rtf_deployment/test.tf b/examples/resources/anypoint_rtf_deployment/test.tf
new file mode 100644
index 0000000..53eed82
--- /dev/null
+++ b/examples/resources/anypoint_rtf_deployment/test.tf
@@ -0,0 +1,36 @@
+variable "root_org" {
+ default = "xx1f55d6-213d-4f60-845c-207286484cd1"
+}
+
+variable "owner_id" {
+ default = "18f23771-c78a-4be2-af8f-1bae66f43942"
+}
+
+resource "anypoint_bg" "bg" {
+ name = "TEST_BG_TF"
+ parent_organization_id = var.root_org
+ owner_id = var.owner_id
+ entitlements_createsuborgs = true
+ entitlements_createenvironments = true
+ entitlements_globaldeployment = true
+ entitlements_vcoresproduction_assigned = 0
+ entitlements_vcoressandbox_assigned = 0
+ entitlements_vcoresdesign_assigned = 0
+ entitlements_staticips_assigned = 0
+ entitlements_vpcs_assigned = 1
+ entitlements_loadbalancer_assigned = 0
+ entitlements_vpns_assigned = 1
+}
+
+resource "anypoint_env" "env" {
+ org_id = anypoint_bg.bg.id # environment related business group
+ name = "DEV" # environment name
+ type = "sandbox" # environment type : sandbox/production
+}
+
+resource "anypoint_fabrics" "fabrics" {
+ org_id = var.root_org
+ name = "your-awesome-eks-fabrics"
+ region = "us-east-1"
+ vendor = "eks"
+}
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 30b8864..82b8f00 100644
--- a/go.mod
+++ b/go.mod
@@ -11,6 +11,7 @@ require (
github.com/mulesoft-anypoint/anypoint-client-go/apim v0.1.0
github.com/mulesoft-anypoint/anypoint-client-go/apim_policy v0.0.1
github.com/mulesoft-anypoint/anypoint-client-go/apim_upstream v0.0.1
+ github.com/mulesoft-anypoint/anypoint-client-go/application_manager_v2 v0.1.0
github.com/mulesoft-anypoint/anypoint-client-go/authorization v0.3.0
github.com/mulesoft-anypoint/anypoint-client-go/connected_app v1.1.1
github.com/mulesoft-anypoint/anypoint-client-go/dlb v0.5.0
@@ -20,6 +21,7 @@ require (
github.com/mulesoft-anypoint/anypoint-client-go/org v0.4.0
github.com/mulesoft-anypoint/anypoint-client-go/role v0.2.0
github.com/mulesoft-anypoint/anypoint-client-go/rolegroup v0.2.0
+ github.com/mulesoft-anypoint/anypoint-client-go/rtf v0.1.0
github.com/mulesoft-anypoint/anypoint-client-go/secretgroup v0.1.0
github.com/mulesoft-anypoint/anypoint-client-go/secretgroup_certificate v0.1.0
github.com/mulesoft-anypoint/anypoint-client-go/secretgroup_crl_distributor_configs v0.1.0
@@ -43,7 +45,7 @@ require (
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
- github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect
+ github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320
github.com/hashicorp/go-hclog v1.2.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-plugin v1.4.4 // indirect
diff --git a/go.sum b/go.sum
index cc460d1..1caef90 100644
--- a/go.sum
+++ b/go.sum
@@ -207,6 +207,8 @@ github.com/mulesoft-anypoint/anypoint-client-go/apim_policy v0.0.1 h1:ilSNrRBIBp
github.com/mulesoft-anypoint/anypoint-client-go/apim_policy v0.0.1/go.mod h1:aCSab2+icg00k8ur845GuxfR9MiS2KPEYumO86pauik=
github.com/mulesoft-anypoint/anypoint-client-go/apim_upstream v0.0.1 h1:EAn805cAB8REdbuOVzER/n8F7t8yXMLfJIfyGq94qQ0=
github.com/mulesoft-anypoint/anypoint-client-go/apim_upstream v0.0.1/go.mod h1:RC9wgWhUwqB4ZKWZYE9R/NfFz1yCoss9bJDdTDPYQhI=
+github.com/mulesoft-anypoint/anypoint-client-go/application_manager_v2 v0.1.0 h1:NDuGf1JJZlBdJ4Av9YDSZgiZGtJGXPjzwBha4qMtUFM=
+github.com/mulesoft-anypoint/anypoint-client-go/application_manager_v2 v0.1.0/go.mod h1:aUuFZxGl5fO6FIB9KdO+wE+TrljdcHcjpxMqHlWJ21g=
github.com/mulesoft-anypoint/anypoint-client-go/authorization v0.3.0 h1:9CmX/n8KHR0DpK0WBYt4kO8youq2TYZ8r3XLqmRtJFY=
github.com/mulesoft-anypoint/anypoint-client-go/authorization v0.3.0/go.mod h1:V5QUMbu/p3eKOCwjpkhEH4uYDEg3f0/u9H8PLz1R060=
github.com/mulesoft-anypoint/anypoint-client-go/connected_app v1.1.1 h1:435i7VpcaUIxQzWkvfEDSfGGoqYvGA0L5Ri7ZBOWybk=
@@ -225,6 +227,8 @@ github.com/mulesoft-anypoint/anypoint-client-go/role v0.2.0 h1:q5TddUXaFrY7Vs/7A
github.com/mulesoft-anypoint/anypoint-client-go/role v0.2.0/go.mod h1:igRAIoG0Zo+SVoxRwGxxeAQEgUiz1efQ+n1JfeFV/b0=
github.com/mulesoft-anypoint/anypoint-client-go/rolegroup v0.2.0 h1:1TzIdYGZpvaA8/6v7RM+c2ryWhcMhL2ZK/uLe1neypk=
github.com/mulesoft-anypoint/anypoint-client-go/rolegroup v0.2.0/go.mod h1:4l1nfYhuDY+FUqCfMRVO9/lEkMqvXb6w0ZCX11ZUSyA=
+github.com/mulesoft-anypoint/anypoint-client-go/rtf v0.1.0 h1:UurvOXBdHB/WVb9twcs1K6RDL3xzu0HEicg/zIwEXzs=
+github.com/mulesoft-anypoint/anypoint-client-go/rtf v0.1.0/go.mod h1:yJb5G+9TBG0x+dsOmyq4b/8/vIyfrlj/EkWH7Xi/v28=
github.com/mulesoft-anypoint/anypoint-client-go/secretgroup v0.1.0 h1:KsQJV6GeenYvdJp/oz+45fdzReSH0bqQIuMepHCxG/8=
github.com/mulesoft-anypoint/anypoint-client-go/secretgroup v0.1.0/go.mod h1:KxUtpYEcBqgnWJiyWdTOHF5DJ4jaoSBE7qNBCYsPKb4=
github.com/mulesoft-anypoint/anypoint-client-go/secretgroup_certificate v0.1.0 h1:P1R+zk7588zo/IuvkR8lbwrMAvwtZBSWzta1WvgCL40=