Skip to content

Commit 1ebee71

Browse files
committed
ocsinit: remove stale code from ocsinit
Signed-off-by: Rewant Soni <[email protected]>
1 parent 94e9faf commit 1ebee71

File tree

4 files changed

+83
-212
lines changed

4 files changed

+83
-212
lines changed

controllers/ocsinitialization/ocsinitialization_controller.go

Lines changed: 83 additions & 197 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ import (
2020
opv1a1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
2121
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
2222
corev1 "k8s.io/api/core/v1"
23-
storagev1 "k8s.io/api/storage/v1"
2423
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
2524
"k8s.io/apimachinery/pkg/api/errors"
2625
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -66,8 +65,7 @@ func InitNamespacedName() types.NamespacedName {
6665
// nolint:revive
6766
type OCSInitializationReconciler struct {
6867
client.Client
69-
ctx context.Context
70-
clusters *util.Clusters
68+
ctx context.Context
7169

7270
Log logr.Logger
7371
Scheme *runtime.Scheme
@@ -79,6 +77,85 @@ type OCSInitializationReconciler struct {
7977
crdsBeingWatched sync.Map
8078
}
8179

80+
// SetupWithManager sets up a controller with a manager
81+
func (r *OCSInitializationReconciler) SetupWithManager(mgr ctrl.Manager) error {
82+
operatorNamespace = r.OperatorNamespace
83+
prometheusPredicate := predicate.NewPredicateFuncs(
84+
func(client client.Object) bool {
85+
return strings.HasPrefix(client.GetName(), PrometheusOperatorCSVNamePrefix)
86+
},
87+
)
88+
89+
enqueueOCSInit := handler.EnqueueRequestsFromMapFunc(
90+
func(context context.Context, obj client.Object) []reconcile.Request {
91+
return []reconcile.Request{{
92+
NamespacedName: InitNamespacedName(),
93+
}}
94+
},
95+
)
96+
97+
controller, err := ctrl.NewControllerManagedBy(mgr).
98+
For(&ocsv1.OCSInitialization{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).
99+
Owns(&corev1.Service{}).
100+
Owns(&corev1.Secret{}).
101+
Owns(&promv1.Prometheus{}).
102+
Owns(&corev1.ConfigMap{}).
103+
Owns(&promv1.Alertmanager{}).
104+
Owns(&promv1.ServiceMonitor{}).
105+
// Watcher for storagecluster required to update
106+
// ocs-operator-config configmap if storagecluster is created or deleted
107+
Watches(
108+
&ocsv1.StorageCluster{},
109+
enqueueOCSInit,
110+
builder.WithPredicates(util.EventTypePredicate(true, false, true, false)),
111+
).
112+
// Watcher for rook-ceph-operator-config cm
113+
Watches(
114+
&corev1.ConfigMap{
115+
ObjectMeta: metav1.ObjectMeta{
116+
Name: util.RookCephOperatorConfigName,
117+
Namespace: r.OperatorNamespace,
118+
},
119+
},
120+
enqueueOCSInit,
121+
).
122+
// Watcher for ocs-operator-config cm
123+
Watches(
124+
&corev1.ConfigMap{
125+
ObjectMeta: metav1.ObjectMeta{
126+
Name: util.OcsOperatorConfigName,
127+
Namespace: r.OperatorNamespace,
128+
},
129+
},
130+
enqueueOCSInit,
131+
).
132+
// Watcher for prometheus operator csv
133+
Watches(
134+
&opv1a1.ClusterServiceVersion{},
135+
enqueueOCSInit,
136+
builder.WithPredicates(prometheusPredicate),
137+
).
138+
Watches(
139+
&extv1.CustomResourceDefinition{},
140+
enqueueOCSInit,
141+
builder.WithPredicates(
142+
predicate.NewPredicateFuncs(func(obj client.Object) bool {
143+
_, ok := r.crdsBeingWatched.Load(obj.GetName())
144+
return ok
145+
}),
146+
util.EventTypePredicate(true, false, false, false),
147+
),
148+
builder.OnlyMetadata,
149+
).
150+
Build(r)
151+
152+
r.controller = controller
153+
r.cache = mgr.GetCache()
154+
r.crdsBeingWatched.Store(ClusterClaimCrdName, false)
155+
156+
return err
157+
}
158+
82159
// +kubebuilder:rbac:groups=ocs.openshift.io,resources=*,verbs=get;list;watch;create;update;patch;delete
83160
// +kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,verbs=get;create;update
84161
// +kubebuilder:rbac:groups=security.openshift.io,resourceNames=privileged,resources=securitycontextconstraints,verbs=get;create;update
@@ -164,12 +241,6 @@ func (r *OCSInitializationReconciler) Reconcile(ctx context.Context, request rec
164241
}
165242
}
166243

167-
r.clusters, err = util.GetClusters(ctx, r.Client)
168-
if err != nil {
169-
r.Log.Error(err, "Failed to get clusters")
170-
return reconcile.Result{}, err
171-
}
172-
173244
err = r.ensureSCCs(instance)
174245
if err != nil {
175246
reason := ocsv1.ReconcileFailed
@@ -313,102 +384,6 @@ func (r *OCSInitializationReconciler) reconcileDynamicWatches() error {
313384
return nil
314385
}
315386

316-
// SetupWithManager sets up a controller with a manager
317-
func (r *OCSInitializationReconciler) SetupWithManager(mgr ctrl.Manager) error {
318-
operatorNamespace = r.OperatorNamespace
319-
prometheusPredicate := predicate.NewPredicateFuncs(
320-
func(client client.Object) bool {
321-
return strings.HasPrefix(client.GetName(), PrometheusOperatorCSVNamePrefix)
322-
},
323-
)
324-
325-
enqueueOCSInit := handler.EnqueueRequestsFromMapFunc(
326-
func(context context.Context, obj client.Object) []reconcile.Request {
327-
return []reconcile.Request{{
328-
NamespacedName: InitNamespacedName(),
329-
}}
330-
},
331-
)
332-
333-
controller, err := ctrl.NewControllerManagedBy(mgr).
334-
For(&ocsv1.OCSInitialization{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).
335-
Owns(&corev1.Service{}).
336-
Owns(&corev1.Secret{}).
337-
Owns(&promv1.Prometheus{}).
338-
Owns(&corev1.ConfigMap{}).
339-
Owns(&promv1.Alertmanager{}).
340-
Owns(&promv1.ServiceMonitor{}).
341-
// Watcher for storagecluster required to update
342-
// ocs-operator-config configmap if storagecluster spec changes
343-
Watches(
344-
&ocsv1.StorageCluster{},
345-
enqueueOCSInit,
346-
builder.WithPredicates(predicate.GenerationChangedPredicate{}),
347-
).
348-
// Watcher for storageClass required to update values related to replica-1
349-
// in ocs-operator-config configmap, if storageClass changes
350-
Watches(
351-
&storagev1.StorageClass{},
352-
handler.EnqueueRequestsFromMapFunc(
353-
func(context context.Context, obj client.Object) []reconcile.Request {
354-
// Only reconcile if the storageClass has topologyConstrainedPools set
355-
sc := obj.(*storagev1.StorageClass)
356-
if sc.Parameters["topologyConstrainedPools"] != "" {
357-
return []reconcile.Request{{
358-
NamespacedName: InitNamespacedName(),
359-
}}
360-
}
361-
return []reconcile.Request{}
362-
},
363-
),
364-
).
365-
// Watcher for rook-ceph-operator-config cm
366-
Watches(
367-
&corev1.ConfigMap{
368-
ObjectMeta: metav1.ObjectMeta{
369-
Name: util.RookCephOperatorConfigName,
370-
Namespace: r.OperatorNamespace,
371-
},
372-
},
373-
enqueueOCSInit,
374-
).
375-
// Watcher for ocs-operator-config cm
376-
Watches(
377-
&corev1.ConfigMap{
378-
ObjectMeta: metav1.ObjectMeta{
379-
Name: util.OcsOperatorConfigName,
380-
Namespace: r.OperatorNamespace,
381-
},
382-
},
383-
enqueueOCSInit,
384-
).
385-
// Watcher for prometheus operator csv
386-
Watches(
387-
&opv1a1.ClusterServiceVersion{},
388-
enqueueOCSInit,
389-
builder.WithPredicates(prometheusPredicate),
390-
).
391-
Watches(
392-
&extv1.CustomResourceDefinition{},
393-
enqueueOCSInit,
394-
builder.WithPredicates(
395-
predicate.NewPredicateFuncs(func(obj client.Object) bool {
396-
_, ok := r.crdsBeingWatched.Load(obj.GetName())
397-
return ok
398-
}),
399-
util.EventTypePredicate(true, false, false, false),
400-
),
401-
builder.OnlyMetadata,
402-
).
403-
Build(r)
404-
405-
r.controller = controller
406-
r.cache = mgr.GetCache()
407-
r.crdsBeingWatched.Store(ClusterClaimCrdName, false)
408-
409-
return err
410-
}
411-
412387
func (r *OCSInitializationReconciler) ensureClusterClaimExists() error {
413388
operatorNamespace, err := util.GetOperatorNamespace()
414389
if err != nil {
@@ -467,19 +442,13 @@ func (r *OCSInitializationReconciler) ensureRookCephOperatorConfigExists(initial
467442
// When any value in the configmap is updated, the rook-ceph-operator pod is restarted to pick up the new values.
468443
func (r *OCSInitializationReconciler) ensureOcsOperatorConfigExists(initialData *ocsv1.OCSInitialization) error {
469444

470-
enableCephfsVal, err := r.getEnableCephfsKeyValue()
471-
if err != nil {
472-
r.Log.Error(err, "Failed to get enableCephfsKeyValue")
445+
storageClusterList := &ocsv1.StorageClusterList{}
446+
if err := r.List(r.ctx, storageClusterList); err != nil {
473447
return err
474448
}
475449

476450
ocsOperatorConfigData := map[string]string{
477-
util.ClusterNameKey: util.GetClusterID(r.ctx, r.Client, &r.Log),
478-
util.RookCurrentNamespaceOnlyKey: strconv.FormatBool(!(len(r.clusters.GetStorageClusters()) > 1)),
479-
util.EnableTopologyKey: r.getEnableTopologyKeyValue(),
480-
util.TopologyDomainLabelsKey: r.getTopologyDomainLabelsKeyValue(),
481-
util.EnableNFSKey: r.getEnableNFSKeyValue(),
482-
util.EnableCephfsKey: enableCephfsVal,
451+
util.RookCurrentNamespaceOnlyKey: strconv.FormatBool(!(len(storageClusterList.Items) > 1)),
483452
util.DisableCSIDriverKey: strconv.FormatBool(true),
484453
}
485454

@@ -518,89 +487,6 @@ func (r *OCSInitializationReconciler) ensureOcsOperatorConfigExists(initialData
518487
return nil
519488
}
520489

521-
func (r *OCSInitializationReconciler) getEnableTopologyKeyValue() string {
522-
523-
for _, sc := range r.clusters.GetStorageClusters() {
524-
if !sc.Spec.ExternalStorage.Enable && sc.Spec.ManagedResources.CephNonResilientPools.Enable {
525-
// In internal mode return true even if one of the storageCluster has enabled it via the CR
526-
return "true"
527-
} else if sc.Spec.ExternalStorage.Enable {
528-
// In external mode, check if the non-resilient storageClass exists
529-
scName := util.GenerateNameForNonResilientCephBlockPoolStorageClass(&sc)
530-
storageClass := util.GetStorageClassWithName(r.ctx, r.Client, scName)
531-
if storageClass != nil {
532-
return "true"
533-
}
534-
}
535-
}
536-
537-
return "false"
538-
}
539-
540-
// In case of multiple storageClusters when replica-1 is enabled for both an internal and an external cluster, different failure domain keys can lead to complications.
541-
// To prevent this, when gathering information for the external cluster, ensure that the failure domain is specified to match that of the internal cluster (sc.Status.FailureDomain).
542-
func (r *OCSInitializationReconciler) getTopologyDomainLabelsKeyValue() string {
543-
544-
for _, sc := range r.clusters.GetStorageClusters() {
545-
if !sc.Spec.ExternalStorage.Enable && sc.Spec.ManagedResources.CephNonResilientPools.Enable {
546-
// In internal mode return the failure domain key directly from the storageCluster
547-
return sc.Status.FailureDomainKey
548-
} else if sc.Spec.ExternalStorage.Enable {
549-
// In external mode, check if the non-resilient storageClass exists
550-
// determine the failure domain key from the storageClass parameter
551-
scName := util.GenerateNameForNonResilientCephBlockPoolStorageClass(&sc)
552-
storageClass := util.GetStorageClassWithName(r.ctx, r.Client, scName)
553-
if storageClass != nil {
554-
return getFailureDomainKeyFromStorageClassParameter(storageClass)
555-
}
556-
}
557-
}
558-
559-
return ""
560-
}
561-
562-
func (r *OCSInitializationReconciler) getEnableNFSKeyValue() string {
563-
564-
// return true even if one of the storagecluster is using NFS
565-
for _, sc := range r.clusters.GetStorageClusters() {
566-
if sc.Spec.NFS != nil && sc.Spec.NFS.Enable {
567-
return "true"
568-
}
569-
}
570-
571-
return "false"
572-
}
573-
574-
func (r *OCSInitializationReconciler) getEnableCephfsKeyValue() (string, error) {
575-
576-
// list all storage classes and check if any of them is using cephfs
577-
storageClasses := &storagev1.StorageClassList{}
578-
if err := r.Client.List(r.ctx, storageClasses); err != nil {
579-
r.Log.Error(err, "Failed to list storage classes")
580-
return "", err
581-
}
582-
583-
for _, sc := range storageClasses.Items {
584-
if strings.HasSuffix(sc.Provisioner, "cephfs.csi.ceph.com") {
585-
return "true", nil
586-
}
587-
}
588-
589-
return "false", nil
590-
}
591-
592-
func getFailureDomainKeyFromStorageClassParameter(sc *storagev1.StorageClass) string {
593-
failuredomain := sc.Parameters["topologyFailureDomainLabel"]
594-
if failuredomain == "zone" {
595-
return "topology.kubernetes.io/zone"
596-
} else if failuredomain == "rack" {
597-
return "topology.rook.io/rack"
598-
} else if failuredomain == "hostname" || failuredomain == "host" {
599-
return "kubernetes.io/hostname"
600-
}
601-
return ""
602-
}
603-
604490
func (r *OCSInitializationReconciler) reconcileUXBackendSecret(initialData *ocsv1.OCSInitialization) error {
605491

606492
var err error

controllers/util/k8sutil.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,13 +49,8 @@ const (
4949
RookCephOperatorConfigName = "rook-ceph-operator-config"
5050

5151
// These are the keys in the ocs-operator-config configmap
52-
ClusterNameKey = "CSI_CLUSTER_NAME"
5352
RookCurrentNamespaceOnlyKey = "ROOK_CURRENT_NAMESPACE_ONLY"
54-
EnableTopologyKey = "CSI_ENABLE_TOPOLOGY"
55-
TopologyDomainLabelsKey = "CSI_TOPOLOGY_DOMAIN_LABELS"
56-
EnableNFSKey = "ROOK_CSI_ENABLE_NFS"
5753
DisableCSIDriverKey = "ROOK_CSI_DISABLE_DRIVER"
58-
EnableCephfsKey = "ROOK_CSI_ENABLE_CEPHFS"
5954

6055
// This is the name for the FieldIndex
6156
OwnerUIDIndexName = "ownerUID"

hack/install-ocs-operator.sh

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,7 @@ metadata:
2424
namespace: openshift-storage
2525
data:
2626
ROOK_CURRENT_NAMESPACE_ONLY: "true"
27-
CSI_CLUSTER_NAME: "test"
28-
CSI_ENABLE_TOPOLOGY: "test"
29-
CSI_TOPOLOGY_DOMAIN_LABELS: "test"
30-
ROOK_CSI_ENABLE_NFS: "false"
3127
ROOK_CSI_DISABLE_DRIVER: "true"
32-
ROOK_CSI_ENABLE_CEPHFS: "false"
3328
EOF
3429

3530
patch_ocs_client_operator_config_configmap() {

metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go

Lines changed: 0 additions & 5 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)