Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,17 @@ rules:
- list
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- networking.istio.io
resources:
Expand Down
4 changes: 4 additions & 0 deletions controllers/argorollouts_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"context"

rolloutsmanagerv1alpha1 "github.com/argoproj-labs/argo-rollouts-manager/api/v1alpha1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
Expand Down Expand Up @@ -77,6 +78,7 @@ var log = logr.Log.WithName("rollouts-controller")
//+kubebuilder:rbac:groups="x.getambassador.io",resources=ambassadormappings;mappings,verbs=create;watch;get;update;list;delete
//+kubebuilder:rbac:groups="apisix.apache.org",resources=apisixroutes,verbs=watch;get;update
//+kubebuilder:rbac:groups="route.openshift.io",resources=routes,verbs=create;watch;get;update;patch;list
//+kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=create;watch;get;update;patch;list

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
Expand Down Expand Up @@ -160,6 +162,8 @@ func (r *RolloutManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {

// Watch for changes to ClusterRoleBinding sub-resources owned by RolloutManager.
bld.Owns(&rbacv1.ClusterRoleBinding{})
// Watch for changes to ServiceMonitor sub-resources owned by RolloutManager.
bld.Owns(&monitoringv1.ServiceMonitor{})

return bld.Complete(r)
}
126 changes: 124 additions & 2 deletions controllers/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,11 @@ import (
"reflect"

rolloutsmanagerv1alpha1 "github.com/argoproj-labs/argo-rollouts-manager/api/v1alpha1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
Expand Down Expand Up @@ -376,15 +378,76 @@ func (r *RolloutManagerReconciler) reconcileRolloutsMetricsService(ctx context.C
}

log.Info(fmt.Sprintf("Creating Service %s", expectedSvc.Name))
return r.Client.Create(ctx, expectedSvc)
if err := r.Client.Create(ctx, expectedSvc); err != nil {
log.Error(err, "Error creating Service", "Name", expectedSvc.Name)
return err
}
return nil
Comment thread
jgwest marked this conversation as resolved.
Outdated
}

if !reflect.DeepEqual(actualSvc.Spec.Ports, expectedSvc.Spec.Ports) {
log.Info(fmt.Sprintf("Ports of Service %s do not match the expected state, hence updating it", actualSvc.Name))
actualSvc.Spec.Ports = expectedSvc.Spec.Ports
return r.Client.Create(ctx, actualSvc)
if err := r.Client.Update(ctx, actualSvc); err != nil {
log.Error(err, "Error updating Ports of Service", "Name", actualSvc.Name)
return err
}
return nil
Comment thread
jgwest marked this conversation as resolved.
Outdated
}

// Checks if user is using the Prometheus operator by checking CustomResourceDefinition for ServiceMonitor
Comment thread
jgwest marked this conversation as resolved.
smCRD := &crdv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "servicemonitors.monitoring.coreos.com",
},
}

if err := fetchObject(ctx, r.Client, smCRD.Namespace, smCRD.Name, smCRD); err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to get the ServiceMonitor %s : %s", smCRD.Name, err)
}
return nil
}

// Create ServiceMonitor for Rollouts metrics
existingServiceMonitor := &monitoringv1.ServiceMonitor{}
if err := fetchObject(ctx, r.Client, cr.Namespace, actualSvc.Name, existingServiceMonitor); err != nil {
if apierrors.IsNotFound(err) {
err = r.createServiceMonitorIfAbsent(ctx, cr.Namespace, cr, actualSvc.Name, actualSvc.Name)
if err != nil {
return err
}
} else {
log.Error(err, "Error querying for ServiceMonitor", "Namespace", cr.Namespace, "Name", actualSvc.Name)
return err
}
} else {
log.Info("A ServiceMonitor instance already exists",
"Namespace", existingServiceMonitor.Namespace, "Name", existingServiceMonitor.Name)

// Check if existing ServiceMonitor matches expected content
if !serviceMonitorMatches(existingServiceMonitor, actualSvc.Name) {
log.Info("Updating existing ServiceMonitor instance",
"Namespace", existingServiceMonitor.Namespace, "Name", existingServiceMonitor.Name)

// Update ServiceMonitor with expected content
existingServiceMonitor.Spec.Selector.MatchLabels = map[string]string{
"app.kubernetes.io/name": actualSvc.Name,
}
existingServiceMonitor.Spec.Endpoints = []monitoringv1.Endpoint{
{
Port: "metrics",
},
}

if err := r.Client.Update(ctx, existingServiceMonitor); err != nil {
log.Error(err, "Error updating existing ServiceMonitor instance",
"Namespace", existingServiceMonitor.Namespace, "Name", existingServiceMonitor.Name)
return err
}
}
return nil
}
return nil
}

Expand Down Expand Up @@ -934,3 +997,62 @@ func GetAggregateToViewPolicyRules() []rbacv1.PolicyRule {
},
}
}

func (r *RolloutManagerReconciler) createServiceMonitorIfAbsent(ctx context.Context, namespace string, rolloutManager rolloutsmanagerv1alpha1.RolloutManager, name, serviceMonitorLabel string) error {
serviceMonitor := &monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: monitoringv1.ServiceMonitorSpec{
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": serviceMonitorLabel,
},
},
Endpoints: []monitoringv1.Endpoint{
{
Port: "metrics",
},
},
},
}
log.Info("Creating a new ServiceMonitor instance",
"Namespace", serviceMonitor.Namespace, "Name", serviceMonitor.Name)

// Set the RolloutManager instance as the owner and controller
if err := controllerutil.SetControllerReference(&rolloutManager, serviceMonitor, r.Scheme); err != nil {
log.Error(err, "Error setting read role owner ref",
"Namespace", serviceMonitor.Namespace, "Name", serviceMonitor.Name, "RolloutManager Name", rolloutManager.Name)
return err
}

err := r.Client.Create(ctx, serviceMonitor)
if err != nil {
log.Error(err, "Error creating a new ServiceMonitor instance",
"Namespace", serviceMonitor.Namespace, "Name", serviceMonitor.Name)
return err
}

return nil

}

func serviceMonitorMatches(sm *monitoringv1.ServiceMonitor, matchLabel string) bool {
// Check if labels match
labels := sm.Spec.Selector.MatchLabels
if val, ok := labels["app.kubernetes.io/name"]; ok {
if val != matchLabel {
return false
}
} else {
return false
}

// Check if endpoints match
if sm.Spec.Endpoints[0].Port != "metrics" {
Comment thread
jgwest marked this conversation as resolved.
Outdated
return false
}

return true
}
172 changes: 172 additions & 0 deletions controllers/resources_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,13 @@ import (
"github.com/argoproj-labs/argo-rollouts-manager/api/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
Comment thread
jgwest marked this conversation as resolved.
Outdated
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
Expand Down Expand Up @@ -236,6 +239,15 @@ var _ = Describe("Resource creation and cleanup tests", func() {
},
},
},
{
fmt.Sprintf("ServiceMonitor %s", DefaultArgoRolloutsResourceName),
&monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: DefaultArgoRolloutsResourceName,
Namespace: a.Namespace,
},
},
},
}

for _, test := range tt {
Expand Down Expand Up @@ -266,4 +278,164 @@ var _ = Describe("Resource creation and cleanup tests", func() {
}
})

Context("Rollouts Metics ServiceMonitor test", func() {
Comment thread
jgwest marked this conversation as resolved.
Outdated
var (
ctx context.Context
a *v1alpha1.RolloutManager
r *RolloutManagerReconciler
req reconcile.Request
)

BeforeEach(func() {
ctx = context.Background()
a = makeTestRolloutManager()
r = makeTestReconciler(a)
err := createNamespace(r, a.Namespace)
Expect(err).ToNot(HaveOccurred())
req = reconcile.Request{
NamespacedName: types.NamespacedName{
Name: a.Name,
Namespace: a.Namespace,
},
}
})

It("Verify whether RolloutManager creating ServiceMonitor", func() {
smCRD, existingSvc := serviceAndServiceMonitorCRD(req.Namespace)
Expect(r.Client.Create(ctx, smCRD)).To(Succeed())
Expect(r.Client.Create(ctx, existingSvc)).To(Succeed())
Comment thread
jgwest marked this conversation as resolved.
Outdated

res, err := r.Reconcile(ctx, req)
Expect(err).ToNot(HaveOccurred())
Expect(res.Requeue).Should(BeFalse(), "reconcile should not requeue request")

expectedServiceMonitor := serviceMonitor()

sm := &monitoringv1.ServiceMonitor{}
Expect(r.Client.Get(ctx, types.NamespacedName{
Name: DefaultArgoRolloutsMetricsServiceName,
Namespace: testNamespace,
}, sm)).To(Succeed())

Expect(sm.Name).To(Equal(expectedServiceMonitor.Name))
Expect(sm.Namespace).To(Equal(expectedServiceMonitor.Namespace))
Expect(sm.Spec).To(Equal(expectedServiceMonitor.Spec))
})

It("Verify if ServiceMonitor exists, but has different content than we expect then it should update ServiceMonitor", func() {
smCRD, existingSvc := serviceAndServiceMonitorCRD(req.Namespace)
Expect(r.Client.Create(ctx, smCRD)).To(Succeed())
Expect(r.Client.Create(ctx, existingSvc)).To(Succeed())

existingServiceMonitor := &monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: DefaultArgoRolloutsMetricsServiceName,
Namespace: testNamespace,
},
Spec: monitoringv1.ServiceMonitorSpec{
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": "test-label",
},
},
Endpoints: []monitoringv1.Endpoint{
{
Port: "metrics-test",
},
},
},
}

Expect(r.Client.Create(ctx, existingServiceMonitor)).To(Succeed())

res, err := r.Reconcile(ctx, req)
Expect(err).ToNot(HaveOccurred())
Expect(res.Requeue).Should(BeFalse(), "reconcile should not requeue request")

expectedSM := serviceMonitor()

Expect(r.Client.Get(ctx, types.NamespacedName{
Name: DefaultArgoRolloutsMetricsServiceName,
Namespace: testNamespace,
}, existingServiceMonitor)).To(Succeed())

Expect(existingServiceMonitor.Name).To(Equal(expectedSM.Name))
Expect(existingServiceMonitor.Namespace).To(Equal(expectedSM.Namespace))
Expect(existingServiceMonitor.Spec).To(Equal(expectedSM.Spec))

})

Comment thread
jgwest marked this conversation as resolved.
It("Verify ServiceMonitor is not created if the CRD does not exist.", func() {
_, existingSvc := serviceAndServiceMonitorCRD(req.Namespace)
Expect(r.Client.Create(ctx, existingSvc)).To(Succeed())

res, err := r.Reconcile(ctx, req)
Expect(err).ToNot(HaveOccurred())
Expect(res.Requeue).Should(BeFalse(), "reconcile should not requeue request")

sm := &monitoringv1.ServiceMonitor{}
Expect(r.Client.Get(ctx, types.NamespacedName{
Name: DefaultArgoRolloutsMetricsServiceName,
Namespace: testNamespace,
}, sm)).ToNot(Succeed())
})
})

})
Comment thread
jgwest marked this conversation as resolved.

func serviceMonitor() *monitoringv1.ServiceMonitor {
sm := &monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: DefaultArgoRolloutsMetricsServiceName,
Namespace: testNamespace,
},
Spec: monitoringv1.ServiceMonitorSpec{
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": DefaultArgoRolloutsMetricsServiceName,
},
},
Endpoints: []monitoringv1.Endpoint{
{
Port: "metrics",
},
},
},
}
return sm
}

func serviceAndServiceMonitorCRD(namespace string) (*crdv1.CustomResourceDefinition, *corev1.Service) {
smCRD := &crdv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "servicemonitors.monitoring.coreos.com",
},
}

existingSvc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: DefaultArgoRolloutsMetricsServiceName,
Namespace: namespace,
Labels: map[string]string{
"app.kubernetes.io/name": DefaultArgoRolloutsResourceName,
"app.kubernetes.io/component": "server",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "metrics",
Port: 8090,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(8090),
},
},
Selector: map[string]string{
DefaultRolloutsSelectorKey: DefaultArgoRolloutsResourceName,
},
},
}

return smCRD, existingSvc

}
Loading