Skip to content

Commit

Permalink
chore: update references in the crates
Browse files Browse the repository at this point in the history
The versioned module is imported rather than the individual structs and enums (when there is no conflict, eg: if also importing a versioned shared struct) so that that usages show the version explicitly.

There might be times where this isn't possible, for example, once structs and enums are versioned in stackable-operator, there could be multiple modules with the same name.

In this case, user-info-fetcher is also versioned with v1alpha1, so it is referred to as user_info_fetcher::v1alpha1 in crd/mod.rs so as to not conflict with the crds v1alpha1.
  • Loading branch information
NickLarsenNZ committed Dec 5, 2024
1 parent 19d9328 commit e9d201c
Show file tree
Hide file tree
Showing 8 changed files with 104 additions and 83 deletions.
97 changes: 57 additions & 40 deletions rust/operator-binary/src/controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@ use serde::{Deserialize, Serialize};
use serde_json::json;
use snafu::{OptionExt, ResultExt, Snafu};
use stackable_opa_operator::crd::{
user_info_fetcher, Container, OpaCluster, OpaClusterStatus, OpaConfig, OpaRole, APP_NAME,
DEFAULT_SERVER_GRACEFUL_SHUTDOWN_TIMEOUT, OPERATOR_NAME,
user_info_fetcher, v1alpha1, APP_NAME, DEFAULT_SERVER_GRACEFUL_SHUTDOWN_TIMEOUT, OPERATOR_NAME,
};
use stackable_operator::{
builder::{
Expand Down Expand Up @@ -175,31 +174,31 @@ pub enum Error {
#[snafu(display("failed to apply Service for [{rolegroup}]"))]
ApplyRoleGroupService {
source: stackable_operator::cluster_resources::Error,
rolegroup: RoleGroupRef<OpaCluster>,
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
},

#[snafu(display("failed to build ConfigMap for [{rolegroup}]"))]
BuildRoleGroupConfig {
source: stackable_operator::builder::configmap::Error,
rolegroup: RoleGroupRef<OpaCluster>,
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
},

#[snafu(display("failed to apply ConfigMap for [{rolegroup}]"))]
ApplyRoleGroupConfig {
source: stackable_operator::cluster_resources::Error,
rolegroup: RoleGroupRef<OpaCluster>,
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
},

#[snafu(display("failed to apply DaemonSet for [{rolegroup}]"))]
ApplyRoleGroupDaemonSet {
source: stackable_operator::cluster_resources::Error,
rolegroup: RoleGroupRef<OpaCluster>,
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
},

#[snafu(display("failed to apply patch for DaemonSet for [{rolegroup}]"))]
ApplyPatchRoleGroupDaemonSet {
source: stackable_operator::client::Error,
rolegroup: RoleGroupRef<OpaCluster>,
rolegroup: RoleGroupRef<v1alpha1::OpaCluster>,
},

#[snafu(display("failed to patch service account"))]
Expand Down Expand Up @@ -388,7 +387,7 @@ pub struct OpaClusterConfigDecisionLog {
}

pub async fn reconcile_opa(
opa: Arc<DeserializeGuard<OpaCluster>>,
opa: Arc<DeserializeGuard<v1alpha1::OpaCluster>>,
ctx: Arc<Ctx>,
) -> Result<Action> {
tracing::info!("Starting reconcile");
Expand All @@ -404,7 +403,7 @@ pub async fn reconcile_opa(
.spec
.image
.resolve(DOCKER_IMAGE_BASE_NAME, crate::built_info::PKG_VERSION);
let opa_role = OpaRole::Server;
let opa_role = v1alpha1::OpaRole::Server;

let mut cluster_resources = ClusterResources::new(
APP_NAME,
Expand Down Expand Up @@ -563,7 +562,7 @@ pub async fn reconcile_opa(
let cluster_operation_cond_builder =
ClusterOperationsConditionBuilder::new(&opa.spec.cluster_operation);

let status = OpaClusterStatus {
let status = v1alpha1::OpaClusterStatus {
conditions: compute_conditions(opa, &[&ds_cond_builder, &cluster_operation_cond_builder]),
};

Expand All @@ -583,10 +582,10 @@ pub async fn reconcile_opa(
/// The server-role service is the primary endpoint that should be used by clients that do not perform internal load balancing,
/// including targets outside of the cluster.
pub fn build_server_role_service(
opa: &OpaCluster,
opa: &v1alpha1::OpaCluster,
resolved_product_image: &ResolvedProductImage,
) -> Result<Service> {
let role_name = OpaRole::Server.to_string();
let role_name = v1alpha1::OpaRole::Server.to_string();
let role_svc_name = opa
.server_role_service_name()
.context(RoleServiceNameNotFoundSnafu)?;
Expand Down Expand Up @@ -632,9 +631,9 @@ pub fn build_server_role_service(
///
/// This is mostly useful for internal communication between peers, or for clients that perform client-side load balancing.
fn build_rolegroup_service(
opa: &OpaCluster,
opa: &v1alpha1::OpaCluster,
resolved_product_image: &ResolvedProductImage,
rolegroup: &RoleGroupRef<OpaCluster>,
rolegroup: &RoleGroupRef<v1alpha1::OpaCluster>,
) -> Result<Service> {
let prometheus_label =
Label::try_from(("prometheus.io/scrape", "true")).context(BuildLabelSnafu)?;
Expand Down Expand Up @@ -677,10 +676,10 @@ fn build_rolegroup_service(

/// The rolegroup [`ConfigMap`] configures the rolegroup based on the configuration given by the administrator
fn build_server_rolegroup_config_map(
opa: &OpaCluster,
opa: &v1alpha1::OpaCluster,
resolved_product_image: &ResolvedProductImage,
rolegroup: &RoleGroupRef<OpaCluster>,
merged_config: &OpaConfig,
rolegroup: &RoleGroupRef<v1alpha1::OpaCluster>,
merged_config: &v1alpha1::OpaConfig,
vector_aggregator_address: Option<&str>,
) -> Result<ConfigMap> {
let mut cm_builder = ConfigMapBuilder::new();
Expand Down Expand Up @@ -736,12 +735,12 @@ fn build_server_rolegroup_config_map(
/// policy queries (which are often chained in serial, and block other tasks in the products).
#[allow(clippy::too_many_arguments)]
fn build_server_rolegroup_daemonset(
opa: &OpaCluster,
opa: &v1alpha1::OpaCluster,
resolved_product_image: &ResolvedProductImage,
opa_role: &OpaRole,
rolegroup_ref: &RoleGroupRef<OpaCluster>,
opa_role: &v1alpha1::OpaRole,
rolegroup_ref: &RoleGroupRef<v1alpha1::OpaCluster>,
server_config: &HashMap<PropertyNameKind, BTreeMap<String, String>>,
merged_config: &OpaConfig,
merged_config: &v1alpha1::OpaConfig,
opa_bundle_builder_image: &str,
user_info_fetcher_image: &str,
service_account: &ServiceAccount,
Expand All @@ -764,15 +763,15 @@ fn build_server_rolegroup_daemonset(

let mut pb = PodBuilder::new();

let prepare_container_name = Container::Prepare.to_string();
let prepare_container_name = v1alpha1::Container::Prepare.to_string();
let mut cb_prepare =
ContainerBuilder::new(&prepare_container_name).context(IllegalContainerNameSnafu)?;

let bundle_builder_container_name = Container::BundleBuilder.to_string();
let bundle_builder_container_name = v1alpha1::Container::BundleBuilder.to_string();
let mut cb_bundle_builder =
ContainerBuilder::new(&bundle_builder_container_name).context(IllegalContainerNameSnafu)?;

let opa_container_name = Container::Opa.to_string();
let opa_container_name = v1alpha1::Container::Opa.to_string();
let mut cb_opa =
ContainerBuilder::new(&opa_container_name).context(IllegalContainerNameSnafu)?;

Expand Down Expand Up @@ -967,9 +966,9 @@ fn build_server_rolegroup_daemonset(
);

match &user_info.backend {
user_info_fetcher::Backend::None {} => {}
user_info_fetcher::Backend::ExperimentalXfscAas(_) => {}
user_info_fetcher::Backend::ActiveDirectory(ad) => {
user_info_fetcher::v1alpha1::Backend::None {} => {}
user_info_fetcher::v1alpha1::Backend::ExperimentalXfscAas(_) => {}
user_info_fetcher::v1alpha1::Backend::ActiveDirectory(ad) => {
pb.add_volume(
SecretClassVolume::new(
ad.kerberos_secret_class_name.clone(),
Expand Down Expand Up @@ -1003,7 +1002,7 @@ fn build_server_rolegroup_daemonset(
.add_volumes_and_mounts(&mut pb, vec![&mut cb_user_info_fetcher])
.context(UserInfoFetcherTlsVolumeAndMountsSnafu)?;
}
user_info_fetcher::Backend::Keycloak(keycloak) => {
user_info_fetcher::v1alpha1::Backend::Keycloak(keycloak) => {
pb.add_volume(
VolumeBuilder::new(USER_INFO_FETCHER_CREDENTIALS_VOLUME_NAME)
.secret(SecretVolumeSource {
Expand Down Expand Up @@ -1035,7 +1034,10 @@ fn build_server_rolegroup_daemonset(
resolved_product_image,
CONFIG_VOLUME_NAME,
LOG_VOLUME_NAME,
merged_config.logging.containers.get(&Container::Vector),
merged_config
.logging
.containers
.get(&v1alpha1::Container::Vector),
ResourceRequirementsBuilder::new()
.with_cpu_request("250m")
.with_cpu_limit("500m")
Expand Down Expand Up @@ -1092,7 +1094,7 @@ fn build_server_rolegroup_daemonset(
}

pub fn error_policy(
_obj: Arc<DeserializeGuard<OpaCluster>>,
_obj: Arc<DeserializeGuard<v1alpha1::OpaCluster>>,
error: &Error,
_ctx: Arc<Ctx>,
) -> Action {
Expand All @@ -1104,12 +1106,15 @@ pub fn error_policy(
}
}

fn build_config_file(merged_config: &OpaConfig) -> String {
fn build_config_file(merged_config: &v1alpha1::OpaConfig) -> String {
let mut decision_logging_enabled = DEFAULT_DECISION_LOGGING_ENABLED;

if let Some(ContainerLogConfig {
choice: Some(ContainerLogConfigChoice::Automatic(log_config)),
}) = merged_config.logging.containers.get(&Container::Opa)
}) = merged_config
.logging
.containers
.get(&v1alpha1::Container::Opa)
{
if let Some(config) = log_config.loggers.get("decision") {
decision_logging_enabled = config.level != LogLevel::NONE;
Expand All @@ -1129,15 +1134,18 @@ fn build_config_file(merged_config: &OpaConfig) -> String {
serde_json::to_string_pretty(&json!(config)).unwrap()
}

fn build_opa_start_command(merged_config: &OpaConfig, container_name: &str) -> String {
fn build_opa_start_command(merged_config: &v1alpha1::OpaConfig, container_name: &str) -> String {
let mut file_log_level = DEFAULT_FILE_LOG_LEVEL;
let mut console_log_level = DEFAULT_CONSOLE_LOG_LEVEL;
let mut server_log_level = DEFAULT_SERVER_LOG_LEVEL;
let mut decision_log_level = DEFAULT_DECISION_LOG_LEVEL;

if let Some(ContainerLogConfig {
choice: Some(ContainerLogConfigChoice::Automatic(log_config)),
}) = merged_config.logging.containers.get(&Container::Opa)
}) = merged_config
.logging
.containers
.get(&v1alpha1::Container::Opa)
{
if let Some(AppenderConfig {
level: Some(log_level),
Expand Down Expand Up @@ -1198,7 +1206,10 @@ fn build_opa_start_command(merged_config: &OpaConfig, container_name: &str) -> S
}
}

fn build_bundle_builder_start_command(merged_config: &OpaConfig, container_name: &str) -> String {
fn build_bundle_builder_start_command(
merged_config: &v1alpha1::OpaConfig,
container_name: &str,
) -> String {
let mut console_logging_off = false;

// We need to check if the console logging is deactivated (NONE)
Expand All @@ -1208,7 +1219,7 @@ fn build_bundle_builder_start_command(merged_config: &OpaConfig, container_name:
}) = merged_config
.logging
.containers
.get(&Container::BundleBuilder)
.get(&v1alpha1::Container::BundleBuilder)
{
if let Some(AppenderConfig {
level: Some(log_level),
Expand All @@ -1233,13 +1244,13 @@ fn build_bundle_builder_start_command(merged_config: &OpaConfig, container_name:
}
}

fn bundle_builder_log_level(merged_config: &OpaConfig) -> BundleBuilderLogLevel {
fn bundle_builder_log_level(merged_config: &v1alpha1::OpaConfig) -> BundleBuilderLogLevel {
if let Some(ContainerLogConfig {
choice: Some(ContainerLogConfigChoice::Automatic(log_config)),
}) = merged_config
.logging
.containers
.get(&Container::BundleBuilder)
.get(&v1alpha1::Container::BundleBuilder)
{
if let Some(logger) = log_config
.loggers
Expand All @@ -1252,11 +1263,17 @@ fn bundle_builder_log_level(merged_config: &OpaConfig) -> BundleBuilderLogLevel
BundleBuilderLogLevel::Info
}

fn build_prepare_start_command(merged_config: &OpaConfig, container_name: &str) -> Vec<String> {
fn build_prepare_start_command(
merged_config: &v1alpha1::OpaConfig,
container_name: &str,
) -> Vec<String> {
let mut prepare_container_args = vec![];
if let Some(ContainerLogConfig {
choice: Some(ContainerLogConfigChoice::Automatic(log_config)),
}) = merged_config.logging.containers.get(&Container::Prepare)
}) = merged_config
.logging
.containers
.get(&v1alpha1::Container::Prepare)
{
prepare_container_args.push(product_logging::framework::capture_shell_output(
STACKABLE_LOG_DIR,
Expand Down
10 changes: 5 additions & 5 deletions rust/operator-binary/src/discovery.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::controller::{build_recommended_labels, APP_PORT};

use snafu::{OptionExt, ResultExt, Snafu};
use stackable_opa_operator::crd::{OpaCluster, OpaRole};
use stackable_opa_operator::crd::v1alpha1;
use stackable_operator::{
builder::{configmap::ConfigMapBuilder, meta::ObjectMetaBuilder},
commons::product_image_selection::ResolvedProductImage,
Expand All @@ -15,7 +15,7 @@ pub enum Error {
#[snafu(display("object {} is missing metadata to build owner reference", opa))]
ObjectMissingMetadataForOwnerRef {
source: stackable_operator::builder::meta::Error,
opa: ObjectRef<OpaCluster>,
opa: ObjectRef<v1alpha1::OpaCluster>,
},

#[snafu(display("object has no name associated"))]
Expand All @@ -38,7 +38,7 @@ pub enum Error {
/// Builds discovery [`ConfigMap`]s for connecting to a [`OpaCluster`] for all expected scenarios
pub fn build_discovery_configmaps(
owner: &impl Resource<DynamicType = ()>,
opa: &OpaCluster,
opa: &v1alpha1::OpaCluster,
resolved_product_image: &ResolvedProductImage,
svc: &Service,
cluster_info: &KubernetesClusterInfo,
Expand All @@ -58,7 +58,7 @@ pub fn build_discovery_configmaps(
fn build_discovery_configmap(
name: &str,
owner: &impl Resource<DynamicType = ()>,
opa: &OpaCluster,
opa: &v1alpha1::OpaCluster,
resolved_product_image: &ResolvedProductImage,
svc: &Service,
cluster_info: &KubernetesClusterInfo,
Expand All @@ -85,7 +85,7 @@ fn build_discovery_configmap(
.with_recommended_labels(build_recommended_labels(
opa,
&resolved_product_image.app_version_label,
&OpaRole::Server.to_string(),
&v1alpha1::OpaRole::Server.to_string(),
"discovery",
))
.context(ObjectMetaSnafu)?
Expand Down
10 changes: 6 additions & 4 deletions rust/operator-binary/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use std::sync::Arc;
use clap::{crate_description, crate_version, Parser};
use futures::StreamExt;
use product_config::ProductConfigManager;
use stackable_opa_operator::crd::{OpaCluster, APP_NAME, OPERATOR_NAME};
use stackable_opa_operator::crd::{v1alpha1, OpaCluster, APP_NAME, OPERATOR_NAME};
use stackable_operator::{
cli::{Command, ProductOperatorRun},
client::{self, Client},
Expand All @@ -18,7 +18,8 @@ use stackable_operator::{
},
logging::controller::report_controller_reconciled,
namespace::WatchNamespace,
CustomResourceExt,
shared::yaml::SerializeOptions,
YamlSchema,
};

use crate::controller::OPA_CONTROLLER_NAME;
Expand Down Expand Up @@ -54,7 +55,8 @@ async fn main() -> anyhow::Result<()> {
let opts = Opts::parse();
match opts.cmd {
Command::Crd => {
OpaCluster::print_yaml_schema(built_info::PKG_VERSION)?;
OpaCluster::merged_crd(OpaCluster::V1Alpha1)?
.print_yaml_schema(built_info::PKG_VERSION, SerializeOptions::default())?;
}
Command::Run(OpaRun {
operator_image,
Expand Down Expand Up @@ -112,7 +114,7 @@ async fn create_controller(
opa_bundle_builder_image: String,
user_info_fetcher_image: String,
) {
let opa_api: Api<DeserializeGuard<OpaCluster>> = watch_namespace.get_api(&client);
let opa_api: Api<DeserializeGuard<v1alpha1::OpaCluster>> = watch_namespace.get_api(&client);
let daemonsets_api: Api<DeserializeGuard<DaemonSet>> = watch_namespace.get_api(&client);
let configmaps_api: Api<DeserializeGuard<ConfigMap>> = watch_namespace.get_api(&client);
let services_api: Api<DeserializeGuard<Service>> = watch_namespace.get_api(&client);
Expand Down
4 changes: 2 additions & 2 deletions rust/operator-binary/src/operations/graceful_shutdown.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use snafu::{ResultExt, Snafu};
use stackable_opa_operator::crd::{OpaConfig, SERVER_GRACEFUL_SHUTDOWN_SAFETY_OVERHEAD};
use stackable_opa_operator::crd::{v1alpha1, SERVER_GRACEFUL_SHUTDOWN_SAFETY_OVERHEAD};
use stackable_operator::builder::pod::PodBuilder;

#[derive(Debug, Snafu)]
Expand All @@ -11,7 +11,7 @@ pub enum Error {
}

pub fn add_graceful_shutdown_config(
merged_config: &OpaConfig,
merged_config: &v1alpha1::OpaConfig,
pod_builder: &mut PodBuilder,
) -> Result<(), Error> {
// This must be always set by the merge mechanism, as we provide a default value,
Expand Down
Loading

0 comments on commit e9d201c

Please sign in to comment.