Skip to content

Commit

Permalink
Merge pull request #2319 from Nikhil-Ladha/upgrade-golangci-lint
Browse files Browse the repository at this point in the history
upgraded golangci-lint to 1.54.2
  • Loading branch information
openshift-merge-bot[bot] authored Dec 14, 2023
2 parents 7b78fb7 + 7d5c9fc commit 73a509b
Show file tree
Hide file tree
Showing 40 changed files with 109 additions and 124 deletions.
7 changes: 2 additions & 5 deletions .github/workflows/ocs-operator-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,8 @@ jobs:

- uses: golangci/golangci-lint-action@v3
with:
version: v1.51.2

# The weird NO_FUTURE thing is a workaround suggested here:
# # https://github.com/golangci/golangci-lint-action/issues/119#issuecomment-981090648
args: "--out-${NO_FUTURE}format=colored-line-number --timeout=6m ./..."
version: v1.54.2
args: "--out-format=colored-line-number --timeout=6m ./..."

go-test:
name: go test
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/backingstorageclasses.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func createOrUpdateBackingStorageclass(r *StorageClusterReconciler, bsc *ocsv1.B
}

// ensureDeleted deletes the backing storageclasses
func (obj *backingStorageClasses) ensureDeleted(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) (reconcile.Result, error) {
func (obj *backingStorageClasses) ensureDeleted(r *StorageClusterReconciler, _ *ocsv1.StorageCluster) (reconcile.Result, error) {
existingBackingStorageClasses := &v1.StorageClassList{}
err := r.Client.List(
r.ctx,
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/cephblockpools_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func TestCephBlockPools(t *testing.T) {
t, reconciler, cr, request := initStorageClusterResourceCreateUpdateTestWithPlatform(
t, cp, objects, nil)
if c.createRuntimeObjects {
objects = createUpdateRuntimeObjects(t, cp, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
objects = createUpdateRuntimeObjects(t, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
}
assertCephBlockPools(t, reconciler, cr, request, false, false)
assertCephNFSBlockPool(t, reconciler, cr, request)
Expand Down
14 changes: 7 additions & 7 deletions controllers/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ func (obj *ocsCephCluster) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.
return reconcile.Result{}, err
}
r.Log.Info("Monitoring Information found. Monitoring will be enabled on the external cluster.", "CephCluster", klog.KRef(sc.Namespace, sc.Name))
cephCluster = newExternalCephCluster(sc, r.images.Ceph, monitoringIP, monitoringPort)
cephCluster = newExternalCephCluster(sc, monitoringIP, monitoringPort)
} else {
// Add KMS details to CephCluster spec, only if
// cluster-wide encryption is enabled
Expand All @@ -190,12 +190,12 @@ func (obj *ocsCephCluster) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.
return reconcile.Result{}, err
}
}
cephCluster, err = newCephCluster(sc, r.images.Ceph, r.nodeCount, r.serverVersion, kmsConfigMap, r.Log)
cephCluster, err = newCephCluster(sc, r.images.Ceph, r.serverVersion, kmsConfigMap, r.Log)
if err != nil {
return reconcile.Result{}, err
}
} else {
cephCluster, err = newCephCluster(sc, r.images.Ceph, r.nodeCount, r.serverVersion, nil, r.Log)
cephCluster, err = newCephCluster(sc, r.images.Ceph, r.serverVersion, nil, r.Log)
if err != nil {
return reconcile.Result{}, err
}
Expand Down Expand Up @@ -397,7 +397,7 @@ func getCephClusterMonitoringLabels(sc ocsv1.StorageCluster) map[string]string {
}

// newCephCluster returns a CephCluster object.
func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, nodeCount int, serverVersion *version.Info, kmsConfigMap *corev1.ConfigMap, reqLogger logr.Logger) (*rookCephv1.CephCluster, error) {
func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, serverVersion *version.Info, kmsConfigMap *corev1.ConfigMap, reqLogger logr.Logger) (*rookCephv1.CephCluster, error) {
labels := map[string]string{
"app": sc.Name,
}
Expand Down Expand Up @@ -585,7 +585,7 @@ func getNetworkSpec(sc ocsv1.StorageCluster) rookCephv1.NetworkSpec {
return networkSpec
}

func newExternalCephCluster(sc *ocsv1.StorageCluster, cephImage, monitoringIP, monitoringPort string) *rookCephv1.CephCluster {
func newExternalCephCluster(sc *ocsv1.StorageCluster, monitoringIP, monitoringPort string) *rookCephv1.CephCluster {
labels := map[string]string{
"app": sc.Name,
}
Expand Down Expand Up @@ -1131,7 +1131,7 @@ func createPrometheusRules(r *StorageClusterReconciler, sc *ocsv1.StorageCluster
changePromRuleExpr(prometheusRule, replaceTokens)
}

if err := createOrUpdatePrometheusRule(r, sc, prometheusRule); err != nil {
if err := createOrUpdatePrometheusRule(r, prometheusRule); err != nil {
r.Log.Error(err, "Prometheus rules could not be created.", "CephCluster", klog.KRef(cluster.Namespace, cluster.Name))
return err
}
Expand Down Expand Up @@ -1199,7 +1199,7 @@ func parsePrometheusRule(rules string) (*monitoringv1.PrometheusRule, error) {
}

// createOrUpdatePrometheusRule creates a prometheusRule object or an error
func createOrUpdatePrometheusRule(r *StorageClusterReconciler, sc *ocsv1.StorageCluster, prometheusRule *monitoringv1.PrometheusRule) error {
func createOrUpdatePrometheusRule(r *StorageClusterReconciler, prometheusRule *monitoringv1.PrometheusRule) error {
name := prometheusRule.GetName()
namespace := prometheusRule.GetNamespace()
client, err := getMonitoringClient()
Expand Down
22 changes: 11 additions & 11 deletions controllers/storagecluster/cephcluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func TestEnsureCephCluster(t *testing.T) {

reconciler := createFakeStorageClusterReconciler(t, networkConfig)

expected, err := newCephCluster(mockStorageCluster.DeepCopy(), "", 3, reconciler.serverVersion, nil, log)
expected, err := newCephCluster(mockStorageCluster.DeepCopy(), "", reconciler.serverVersion, nil, log)
assert.NilError(t, err)
expected.Status.State = c.cephClusterState

Expand Down Expand Up @@ -210,7 +210,7 @@ func TestCephClusterMonTimeout(t *testing.T) {
_, err := obj.ensureCreated(&reconciler, sc)
assert.NilError(t, err)

cc, err := newCephCluster(sc, "", 3, reconciler.serverVersion, nil, log)
cc, err := newCephCluster(sc, "", reconciler.serverVersion, nil, log)
assert.NilError(t, err)
err = reconciler.Client.Get(context.TODO(), mockCephClusterNamespacedName, cc)
assert.NilError(t, err)
Expand Down Expand Up @@ -276,7 +276,7 @@ func TestNewCephClusterMonData(t *testing.T) {
c.sc.Spec.MonDataDirHostPath = c.monDataPath
c.sc.Status.Images.Ceph = &api.ComponentImageStatus{}

actual, err := newCephCluster(c.sc, "", 3, serverVersion, nil, log)
actual, err := newCephCluster(c.sc, "", serverVersion, nil, log)
assert.NilError(t, err)
assert.Equal(t, generateNameForCephCluster(c.sc), actual.Name)
assert.Equal(t, c.sc.Namespace, actual.Namespace)
Expand Down Expand Up @@ -1323,21 +1323,21 @@ func TestLogCollector(t *testing.T) {
sc.Spec.LogCollector = &defaultLogCollector

r := createFakeStorageClusterReconciler(t)
actual, err := newCephCluster(sc, "", 3, r.serverVersion, nil, log)
actual, err := newCephCluster(sc, "", r.serverVersion, nil, log)
assert.NilError(t, err)
assert.DeepEqual(t, actual.Spec.LogCollector, defaultLogCollector)

// when disabled in storageCluster
sc.Spec.LogCollector = &cephv1.LogCollectorSpec{}
actual, err = newCephCluster(sc, "", 3, r.serverVersion, nil, log)
actual, err = newCephCluster(sc, "", r.serverVersion, nil, log)
assert.NilError(t, err)
assert.DeepEqual(t, actual.Spec.LogCollector, defaultLogCollector)

maxLogSize, err = resource.ParseQuantity("6Gi")
assert.NilError(t, err)
sc.Spec.LogCollector.MaxLogSize = &maxLogSize

actual, err = newCephCluster(sc, "", 3, r.serverVersion, nil, log)
actual, err = newCephCluster(sc, "", r.serverVersion, nil, log)
assert.NilError(t, err)
assert.DeepEqual(t, actual.Spec.LogCollector.MaxLogSize, &maxLogSize)
}
Expand Down Expand Up @@ -1466,7 +1466,7 @@ func TestCephClusterNetworkConnectionsSpec(t *testing.T) {
mockStorageCluster.DeepCopyInto(sc)
sc.Spec.Network = testCase.scSpec.Network
sc.Spec.ExternalStorage.Enable = true
cc := newExternalCephCluster(sc, "", "", "")
cc := newExternalCephCluster(sc, "", "")
assert.DeepEqual(t, cc.Spec.Network.Connections, testCase.ccSpec.Network.Connections)
}
// Test for internal mode
Expand All @@ -1478,7 +1478,7 @@ func TestCephClusterNetworkConnectionsSpec(t *testing.T) {
sc.Spec.Network = testCase.scSpec.Network
reconciler := createFakeStorageClusterReconciler(t)
testCase.ccSpec.Network.Connections.RequireMsgr2 = true
cc, _ := newCephCluster(sc, "", 3, reconciler.serverVersion, nil, log)
cc, _ := newCephCluster(sc, "", reconciler.serverVersion, nil, log)
assert.DeepEqual(t, cc.Spec.Network.Connections, testCase.ccSpec.Network.Connections)
}
}
Expand Down Expand Up @@ -1552,7 +1552,7 @@ func TestCephClusterStoreType(t *testing.T) {
r := createFakeStorageClusterReconciler(t)

t.Run("ensure no bluestore optimization", func(t *testing.T) {
actual, err := newCephCluster(sc, "", 3, r.serverVersion, nil, log)
actual, err := newCephCluster(sc, "", r.serverVersion, nil, log)
assert.NilError(t, err)
assert.Equal(t, "", actual.Spec.Storage.Store.Type)
})
Expand All @@ -1562,14 +1562,14 @@ func TestCephClusterStoreType(t *testing.T) {
DisasterRecoveryTargetAnnotation: "true",
}
sc.Annotations = annotations
actual, err := newCephCluster(sc, "", 3, r.serverVersion, nil, log)
actual, err := newCephCluster(sc, "", r.serverVersion, nil, log)
assert.NilError(t, err)
assert.Equal(t, "bluestore-rdr", actual.Spec.Storage.Store.Type)
})

t.Run("ensure no bluestore optimization for external clusters", func(t *testing.T) {
sc.Spec.ExternalStorage.Enable = true
actual, err := newCephCluster(sc, "", 3, r.serverVersion, nil, log)
actual, err := newCephCluster(sc, "", r.serverVersion, nil, log)
assert.NilError(t, err)
assert.Equal(t, "", actual.Spec.Storage.Store.Type)
})
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/cephconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ func (obj *ocsCephConfig) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.S
}

// ensureDeleted is dummy func for the ocsCephConfig
func (obj *ocsCephConfig) ensureDeleted(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) (reconcile.Result, error) {
func (obj *ocsCephConfig) ensureDeleted(_ *StorageClusterReconciler, _ *ocsv1.StorageCluster) (reconcile.Result, error) {
return reconcile.Result{}, nil
}

Expand Down
15 changes: 7 additions & 8 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,11 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
return nil, updateErr
}
continue
} else {
storageProfile.Status.Phase = ""
if updateErr := r.Client.Status().Update(r.ctx, &storageProfile); updateErr != nil {
r.Log.Error(updateErr, "Could not update StorageProfile.", "StorageProfile", klog.KRef(storageProfile.Namespace, storageProfile.Name))
return nil, updateErr
}
}
storageProfile.Status.Phase = ""
if updateErr := r.Client.Status().Update(r.ctx, &storageProfile); updateErr != nil {
r.Log.Error(updateErr, "Could not update StorageProfile.", "StorageProfile", klog.KRef(storageProfile.Namespace, storageProfile.Name))
return nil, updateErr
}
parameters := spSpec.SharedFilesystemConfiguration.Parameters
ret.Spec.DataPools = append(ret.Spec.DataPools, cephv1.NamedPoolSpec{
Expand Down Expand Up @@ -203,7 +202,7 @@ func (r *StorageClusterReconciler) createDefaultSubvolumeGroup(filesystemName, f
return nil
}

func (r *StorageClusterReconciler) deleteDefaultSubvolumeGroup(filesystemName, filesystemNamespace string, ownerReferences []metav1.OwnerReference) error {
func (r *StorageClusterReconciler) deleteDefaultSubvolumeGroup(filesystemName, filesystemNamespace string) error {
existingsvg := &cephv1.CephFilesystemSubVolumeGroup{}
svgName := generateNameForCephSubvolumeGroup(filesystemName)
err := r.Client.Get(r.ctx, types.NamespacedName{Name: svgName, Namespace: filesystemNamespace}, existingsvg)
Expand Down Expand Up @@ -259,7 +258,7 @@ func (obj *ocsCephFilesystems) ensureDeleted(r *StorageClusterReconciler, sc *oc
// skip for the ocs provider mode
if !sc.Spec.AllowRemoteStorageConsumers {
cephSVGName := generateNameForCephSubvolumeGroup(cephFilesystem.Name)
err = r.deleteDefaultSubvolumeGroup(cephFilesystem.Name, cephFilesystem.Namespace, cephFilesystem.ObjectMeta.OwnerReferences)
err = r.deleteDefaultSubvolumeGroup(cephFilesystem.Name, cephFilesystem.Namespace)
if err != nil {
r.Log.Error(err, "Uninstall: unable to delete subvolumegroup", "subvolumegroup", klog.KRef(cephFilesystem.Namespace, cephSVGName))
return reconcile.Result{}, err
Expand Down
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephfilesystem_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func TestCephFileSystem(t *testing.T) {
t, reconciler, cr, request := initStorageClusterResourceCreateUpdateTestWithPlatform(
t, cp, objects, nil)
if c.createRuntimeObjects {
objects = createUpdateRuntimeObjects(t, cp, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
objects = createUpdateRuntimeObjects(t, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
}
assertCephFileSystem(t, reconciler, cr, request)
}
Expand Down Expand Up @@ -83,7 +83,7 @@ func TestDeleteDefaultSubvolumeGroup(t *testing.T) {
filesystem, err := reconciler.newCephFilesystemInstances(cr)
assert.NoError(t, err)

err = reconciler.deleteDefaultSubvolumeGroup(filesystem[0].Name, filesystem[0].Namespace, filesystem[0].OwnerReferences)
err = reconciler.deleteDefaultSubvolumeGroup(filesystem[0].Name, filesystem[0].Namespace)
assert.NoError(t, err)

svg := &cephv1.CephFilesystemSubVolumeGroup{}
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/cephnfs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func TestCephNFS(t *testing.T) {
t, reconciler, cr, request := initStorageClusterResourceCreateUpdateTestWithPlatform(
t, cp, objects, nil)
if c.createRuntimeObjects {
objects = createUpdateRuntimeObjects(t, cp, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
objects = createUpdateRuntimeObjects(t, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
}
assertCephNFS(t, reconciler, cr, request)
assertCephNFSService(t, reconciler, cr, request)
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/cephobjectstores_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func TestCephObjectStores(t *testing.T) {
t, reconciler, cr, request := initStorageClusterResourceCreateUpdateTestWithPlatform(
t, cp, objects, nil)
if c.createRuntimeObjects {
objects = createUpdateRuntimeObjects(t, cp, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
objects = createUpdateRuntimeObjects(t, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
}
assertCephObjectStores(t, reconciler, cr, request)
}
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/cephobjectstoreusers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestCephObjectStoreUsers(t *testing.T) {
t, reconciler, cr, request := initStorageClusterResourceCreateUpdateTestWithPlatform(
t, cp, objects, nil)
if c.createRuntimeObjects {
objects = createUpdateRuntimeObjects(t, cp, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
objects = createUpdateRuntimeObjects(t, reconciler) //nolint:staticcheck //no need to use objects as they update in runtime
}
assertCephObjectStoreUsers(t, reconciler, cr, request)
}
Expand Down
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephrbdmirrors_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func TestCephRbdMirror(t *testing.T) {
case "create-ceph-rbd-mirror":
assertCephRbdMirrorCreation(t, reconciler, cr, request)
case "delete-ceph-rbd-mirror":
assertCephRbdMirrorDeletion(t, reconciler, cr, request)
assertCephRbdMirrorDeletion(t, reconciler, request)
}
}
}
Expand All @@ -83,7 +83,7 @@ func assertCephRbdMirrorCreation(t *testing.T, reconciler StorageClusterReconcil
assert.Equal(t, expectedCrm[0].Spec, actualCrm.Spec)
}

func assertCephRbdMirrorDeletion(t *testing.T, reconciler StorageClusterReconciler, cr *api.StorageCluster, request reconcile.Request) {
func assertCephRbdMirrorDeletion(t *testing.T, reconciler StorageClusterReconciler, request reconcile.Request) {
actualCrm := &cephv1.CephRBDMirror{
ObjectMeta: metav1.ObjectMeta{
Name: "ocsinit-cephrbdmirror",
Expand Down
5 changes: 2 additions & 3 deletions controllers/storagecluster/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func (r *StorageClusterReconciler) enableMetricsExporter(
}

// create/update clusterrole for metrics exporter
if err := updateMetricsExporterClusterRoles(ctx, r, instance); err != nil {
if err := updateMetricsExporterClusterRoles(ctx, r); err != nil {
r.Log.Error(err, "unable to update clusterroles for metrics exporter")
return err
}
Expand Down Expand Up @@ -548,8 +548,7 @@ const metricsExporterClusterRoleJSON = `
]
}`

func updateMetricsExporterClusterRoles(ctx context.Context,
r *StorageClusterReconciler, instance *ocsv1.StorageCluster) error {
func updateMetricsExporterClusterRoles(ctx context.Context, r *StorageClusterReconciler) error {
currentClusterRole := new(rbacv1.ClusterRole)
var expectedClusterRole = new(rbacv1.ClusterRole)
err := json.Unmarshal([]byte(metricsExporterClusterRoleJSON), expectedClusterRole)
Expand Down
6 changes: 3 additions & 3 deletions controllers/storagecluster/external_resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ func (r *StorageClusterReconciler) retrieveSecret(secretName string, instance *o
}

// deleteSecret function delete the secret object with the specified name
func (r *StorageClusterReconciler) deleteSecret(secretName string, instance *ocsv1.StorageCluster) error {
func (r *StorageClusterReconciler) deleteSecret(instance *ocsv1.StorageCluster) error {
found, err := r.retrieveSecret(externalClusterDetailsSecret, instance)
if errors.IsNotFound(err) {
r.Log.Info("External rhcs mode secret already deleted.")
Expand Down Expand Up @@ -275,7 +275,7 @@ func (obj *ocsExternalResources) ensureCreated(r *StorageClusterReconciler, inst
}

// ensureDeleted is dummy func for the ocsExternalResources
func (obj *ocsExternalResources) ensureDeleted(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) (reconcile.Result, error) {
func (obj *ocsExternalResources) ensureDeleted(_ *StorageClusterReconciler, _ *ocsv1.StorageCluster) (reconcile.Result, error) {
return reconcile.Result{}, nil
}

Expand Down Expand Up @@ -511,7 +511,7 @@ func (r *StorageClusterReconciler) deleteExternalSecret(sc *ocsv1.StorageCluster
if !sc.Spec.ExternalStorage.Enable {
return nil
}
err = r.deleteSecret(externalClusterDetailsSecret, sc)
err = r.deleteSecret(sc)
if err != nil {
r.Log.Error(err, "Error while deleting external rhcs mode secret.")
}
Expand Down
6 changes: 0 additions & 6 deletions controllers/storagecluster/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,9 @@ package storagecluster

import (
"fmt"
"os"
"strings"

ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
"github.com/red-hat-storage/ocs-operator/v4/controllers/util"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
)

Expand Down Expand Up @@ -111,10 +109,6 @@ func generateNameForCephNetworkFilesystemSC(initData *ocsv1.StorageCluster) stri
return fmt.Sprintf("%s-ceph-nfs", initData.Name)
}

func generateNameForNFSCSIProvisioner(initData *ocsv1.StorageCluster) string {
return fmt.Sprintf("%s.nfs.csi.ceph.com", os.Getenv(util.OperatorNamespaceEnvVar))
}

// generateNameForSnapshotClass function generates 'SnapshotClass' name.
// 'snapshotType' can be: 'rbdSnapshotter' or 'cephfsSnapshotter' or 'nfsSnapshotter'
func generateNameForSnapshotClass(initData *ocsv1.StorageCluster, snapshotType SnapshotterType) string {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ func createStorageCluster(scName, failureDomainName string,
return cr
}

func createUpdateRuntimeObjects(t *testing.T, cp *Platform, r StorageClusterReconciler) []client.Object {
func createUpdateRuntimeObjects(t *testing.T, r StorageClusterReconciler) []client.Object {
csfs := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "ocsinit-cephfs",
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/job_templates.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func (obj *ocsJobTemplates) ensureCreated(r *StorageClusterReconciler, sc *ocsv1
}

// ensureDeleted is dummy func for the ocsJobTemplates
func (obj *ocsJobTemplates) ensureDeleted(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) (reconcile.Result, error) {
func (obj *ocsJobTemplates) ensureDeleted(_ *StorageClusterReconciler, _ *ocsv1.StorageCluster) (reconcile.Result, error) {
return reconcile.Result{}, nil
}

Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/operator_conditions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@ func (stubCondition) Get(_ context.Context) (*metav1.Condition, error) {

}

func (stubCondition) Set(_ context.Context, status metav1.ConditionStatus, option ...conditions.Option) error {
func (stubCondition) Set(_ context.Context, _ metav1.ConditionStatus, _ ...conditions.Option) error {
return nil
}
Loading

0 comments on commit 73a509b

Please sign in to comment.