Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,17 @@ rules:
- list
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- networking.istio.io
resources:
Expand Down
41 changes: 41 additions & 0 deletions controllers/argorollouts_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,15 @@ import (
"context"

rolloutsmanagerv1alpha1 "github.com/argoproj-labs/argo-rollouts-manager/api/v1alpha1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logr "sigs.k8s.io/controller-runtime/pkg/log"
Expand All @@ -48,6 +51,10 @@ type RolloutManagerReconciler struct {

var log = logr.Log.WithName("rollouts-controller")

const (
serviceMonitorsCRDName = "servicemonitors.monitoring.coreos.com"
)

//+kubebuilder:rbac:groups=argoproj.io,resources=rolloutmanagers,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=argoproj.io,resources=rolloutmanagers/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=argoproj.io,resources=rolloutmanagers/finalizers,verbs=update
Expand Down Expand Up @@ -77,6 +84,7 @@ var log = logr.Log.WithName("rollouts-controller")
//+kubebuilder:rbac:groups="x.getambassador.io",resources=ambassadormappings;mappings,verbs=create;watch;get;update;list;delete
//+kubebuilder:rbac:groups="apisix.apache.org",resources=apisixroutes,verbs=watch;get;update
//+kubebuilder:rbac:groups="route.openshift.io",resources=routes,verbs=create;watch;get;update;patch;list
//+kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=create;watch;get;update;patch;list

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
Expand Down Expand Up @@ -161,5 +169,38 @@ func (r *RolloutManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Watch for changes to ClusterRoleBinding sub-resources owned by RolloutManager.
bld.Owns(&rbacv1.ClusterRoleBinding{})

if crdExists, err := r.doesCRDExist(mgr.GetConfig(), serviceMonitorsCRDName); err != nil {
return err
} else if crdExists {
// We only attempt to own ServiceMonitor if it exists on the cluster on startup
bld.Owns(&monitoringv1.ServiceMonitor{})
}

return bld.Complete(r)
}

// doesCRDExist checks if a CRD is present in the cluster, by using the discovery client.
//
// NOTE: this function should only be called from SetupWithManager. There are more efficient methods to determine this, elsewhere.
func (r *RolloutManagerReconciler) doesCRDExist(cfg *rest.Config, crdName string) (bool, error) {

// Idealy we would use client.Client to retrieve the CRD, here, but since the manager has not yet started, we don't have access to the client from the manager. We would need to convert the rest.Config into a client.Client, and it's easier to use

discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
return false, err
}
apiResources, err := discoveryClient.ServerResourcesForGroupVersion("monitoring.coreos.com/v1")
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
for _, resource := range apiResources.APIResources {
if resource.Name == crdName {
return true, nil
}
}
return false, nil
}
8 changes: 7 additions & 1 deletion controllers/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,23 @@ const (
// ArgoRolloutsImageEnvName is an environment variable that can be used to deploy a
// Custom Image of rollouts controller.
ArgoRolloutsImageEnvName = "ARGO_ROLLOUTS_IMAGE"
// DefaultArgoRolloutsMetricsServiceName is the default name for rollouts metrics service.

// DefaultArgoRolloutsMetricsServiceName is the default name for rollouts metrics Service.
DefaultArgoRolloutsMetricsServiceName = "argo-rollouts-metrics"

// ArgoRolloutsDefaultImage is the default image for rollouts controller.
DefaultArgoRolloutsImage = "quay.io/argoproj/argo-rollouts"

// ArgoRolloutsDefaultVersion is the default version for the Rollouts controller.
DefaultArgoRolloutsVersion = "v1.6.6" // v1.6.6

// DefaultArgoRolloutsResourceName is the default name for Rollouts controller resources such as
// deployment, service, role, rolebinding and serviceaccount.
DefaultArgoRolloutsResourceName = "argo-rollouts"

// DefaultRolloutsNotificationSecretName is the default name for rollout controller secret resource.
DefaultRolloutsNotificationSecretName = "argo-rollouts-notification-secret" // #nosec G101

// DefaultRolloutsServiceSelectorKey is key used by selector
DefaultRolloutsSelectorKey = "app.kubernetes.io/name"

Expand Down
132 changes: 127 additions & 5 deletions controllers/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,11 @@ import (
"reflect"

rolloutsmanagerv1alpha1 "github.com/argoproj-labs/argo-rollouts-manager/api/v1alpha1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
Expand Down Expand Up @@ -376,16 +378,77 @@ func (r *RolloutManagerReconciler) reconcileRolloutsMetricsService(ctx context.C
}

log.Info(fmt.Sprintf("Creating Service %s", expectedSvc.Name))
return r.Client.Create(ctx, expectedSvc)
}
if err := r.Client.Create(ctx, expectedSvc); err != nil {
log.Error(err, "Error creating Service", "Name", expectedSvc.Name)
return err
}
actualSvc = expectedSvc

if !reflect.DeepEqual(actualSvc.Spec.Ports, expectedSvc.Spec.Ports) {
} else if !reflect.DeepEqual(actualSvc.Spec.Ports, expectedSvc.Spec.Ports) {
log.Info(fmt.Sprintf("Ports of Service %s do not match the expected state, hence updating it", actualSvc.Name))
actualSvc.Spec.Ports = expectedSvc.Spec.Ports
return r.Client.Create(ctx, actualSvc)
if err := r.Client.Update(ctx, actualSvc); err != nil {
log.Error(err, "Error updating Ports of Service", "Name", actualSvc.Name)
return err
}
}

// Checks if user is using the Prometheus operator by checking CustomResourceDefinition for ServiceMonitor
smCRD := &crdv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: serviceMonitorsCRDName,
},
}

if err := fetchObject(ctx, r.Client, smCRD.Namespace, smCRD.Name, smCRD); err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to get the ServiceMonitor %s : %s", smCRD.Name, err)
}
return nil
}

// Create ServiceMonitor for Rollouts metrics
existingServiceMonitor := &monitoringv1.ServiceMonitor{}
if err := fetchObject(ctx, r.Client, cr.Namespace, DefaultArgoRolloutsResourceName, existingServiceMonitor); err != nil {
if apierrors.IsNotFound(err) {
if err := r.createServiceMonitorIfAbsent(ctx, cr.Namespace, cr, DefaultArgoRolloutsResourceName, expectedSvc.Name); err != nil {
return err
}
return nil

} else {
log.Error(err, "Error querying for ServiceMonitor", "Namespace", cr.Namespace, "Name", actualSvc.Name)
return err
}

} else {
log.Info("A ServiceMonitor instance already exists",
"Namespace", existingServiceMonitor.Namespace, "Name", existingServiceMonitor.Name)

// Check if existing ServiceMonitor matches expected content
if !serviceMonitorMatches(existingServiceMonitor, actualSvc.Name) {
log.Info("Updating existing ServiceMonitor instance",
"Namespace", existingServiceMonitor.Namespace, "Name", existingServiceMonitor.Name)

// Update ServiceMonitor with expected content
existingServiceMonitor.Spec.Selector.MatchLabels = map[string]string{
"app.kubernetes.io/name": actualSvc.Name,
}
existingServiceMonitor.Spec.Endpoints = []monitoringv1.Endpoint{
{
Port: "metrics",
},
}

if err := r.Client.Update(ctx, existingServiceMonitor); err != nil {
log.Error(err, "Error updating existing ServiceMonitor instance",
"Namespace", existingServiceMonitor.Namespace, "Name", existingServiceMonitor.Name)
return err
}
}
return nil
}

return nil
}

// Reconciles Secrets for Rollouts controller
Expand Down Expand Up @@ -934,3 +997,62 @@ func GetAggregateToViewPolicyRules() []rbacv1.PolicyRule {
},
}
}

func (r *RolloutManagerReconciler) createServiceMonitorIfAbsent(ctx context.Context, namespace string, rolloutManager rolloutsmanagerv1alpha1.RolloutManager, name, serviceMonitorLabel string) error {
serviceMonitor := &monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: monitoringv1.ServiceMonitorSpec{
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": serviceMonitorLabel,
},
},
Endpoints: []monitoringv1.Endpoint{
{
Port: "metrics",
},
},
},
}
log.Info("Creating a new ServiceMonitor instance",
"Namespace", serviceMonitor.Namespace, "Name", serviceMonitor.Name)

// Set the RolloutManager instance as the owner and controller
if err := controllerutil.SetControllerReference(&rolloutManager, serviceMonitor, r.Scheme); err != nil {
log.Error(err, "Error setting read role owner ref",
"Namespace", serviceMonitor.Namespace, "Name", serviceMonitor.Name, "RolloutManager Name", rolloutManager.Name)
return err
}

err := r.Client.Create(ctx, serviceMonitor)
if err != nil {
log.Error(err, "Error creating a new ServiceMonitor instance",
"Namespace", serviceMonitor.Namespace, "Name", serviceMonitor.Name)
return err
}

return nil

}

func serviceMonitorMatches(sm *monitoringv1.ServiceMonitor, matchLabel string) bool {
// Check if labels match
labels := sm.Spec.Selector.MatchLabels
if val, ok := labels["app.kubernetes.io/name"]; ok {
if val != matchLabel {
return false
}
} else {
return false
}

// Check if endpoints match
if len(sm.Spec.Endpoints) == 0 || sm.Spec.Endpoints[0].Port != "metrics" {
return false
}

return true
}
Loading