diff --git a/controlplane/kubeadm/controllers/alias.go b/controlplane/kubeadm/controllers/alias.go index 924a5d808550..0c9453ffd881 100644 --- a/controlplane/kubeadm/controllers/alias.go +++ b/controlplane/kubeadm/controllers/alias.go @@ -33,6 +33,7 @@ import ( // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { Client client.Client + APIReader client.Reader SecretCachingClient client.Client RuntimeClient runtimeclient.Client ClusterCache clustercache.ClusterCache @@ -51,6 +52,7 @@ type KubeadmControlPlaneReconciler struct { func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ Client: r.Client, + APIReader: r.APIReader, SecretCachingClient: r.SecretCachingClient, RuntimeClient: r.RuntimeClient, ClusterCache: r.ClusterCache, diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index fb1ec3aa22a3..7f738ca7e49c 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -48,7 +48,6 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" runtimeclient "sigs.k8s.io/cluster-api/exp/runtime/client" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/cache" @@ -66,6 +65,7 @@ import ( const ( kcpManagerName = "capi-kubeadmcontrolplane" + kcpMetadataManagerName = "capi-kubeadmcontrolplane-metadata" kubeadmControlPlaneKind = "KubeadmControlPlane" ) @@ -80,6 +80,7 @@ const ( // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { Client client.Client + APIReader client.Reader SecretCachingClient client.Client RuntimeClient runtimeclient.Client controller controller.Controller @@ -106,6 +107,9 @@ type KubeadmControlPlaneReconciler struct { overridePreflightChecksFunc func(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result overrideCanUpdateMachineFunc func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) overrideCanExtensionsUpdateMachine func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + disableRemoveManagedFieldsForLabelsAndAnnotations bool } func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { @@ -842,23 +846,21 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro } patchHelpers[machineName] = patchHelper - labelsAndAnnotationsManagedFieldPaths := []contract.Path{ - {"f:metadata", "f:annotations"}, - {"f:metadata", "f:labels"}, - } infraMachine, infraMachineFound := controlPlane.InfraResources[machineName] // Only update the InfraMachine if it is already found, otherwise just skip it. // This could happen e.g. if the cache is not up-to-date yet. if infraMachineFound { - // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations - // from "manager". We do this so that InfrastructureMachines that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, infraMachine, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { + // Drop managedFields for manager:Update and capi-kubeadmcontrolplane:Apply for all objects created with CAPI <= v1.11. + // Starting with CAPI v1.12 we have a new managedField structure where capi-kubeadmcontrolplane-metadata will own + // labels and annotations and capi-kubeadmcontrolplane everything else. + // Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once. + // Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can + // be removed with CAPI v1.15. + if err := ssa.MigrateManagedFields(ctx, r.Client, infraMachine, kcpManagerName, kcpMetadataManagerName); err != nil { return errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine)) } // Update in-place mutating fields on InfrastructureMachine. - if err := r.updateExternalObject(ctx, infraMachine, infraMachine.GroupVersionKind(), controlPlane.KCP, controlPlane.Cluster); err != nil { + if err := r.updateLabelsAndAnnotations(ctx, infraMachine, infraMachine.GroupVersionKind(), controlPlane.KCP, controlPlane.Cluster); err != nil { return errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine)) } } @@ -867,15 +869,17 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro // Only update the KubeadmConfig if it is already found, otherwise just skip it. // This could happen e.g. if the cache is not up-to-date yet. if kubeadmConfigFound { - // Cleanup managed fields of all KubeadmConfigs to drop ownership of labels and annotations - // from "manager". We do this so that KubeadmConfigs that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { + // Drop managedFields for manager:Update and capi-kubeadmcontrolplane:Apply for all objects created with CAPI <= v1.11. + // Starting with CAPI v1.12 we have a new managedField structure where capi-kubeadmcontrolplane-metadata will own + // labels and annotations and capi-kubeadmcontrolplane everything else. + // Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once. + // Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can + // be removed with CAPI v1.15. + if err := ssa.MigrateManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, kcpMetadataManagerName); err != nil { return errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig)) } // Update in-place mutating fields on BootstrapConfig. - if err := r.updateExternalObject(ctx, kubeadmConfig, bootstrapv1.GroupVersion.WithKind("KubeadmConfig"), controlPlane.KCP, controlPlane.Cluster); err != nil { + if err := r.updateLabelsAndAnnotations(ctx, kubeadmConfig, bootstrapv1.GroupVersion.WithKind("KubeadmConfig"), controlPlane.KCP, controlPlane.Cluster); err != nil { return errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig)) } } @@ -1017,7 +1021,6 @@ func reconcileMachineUpToDateCondition(_ context.Context, controlPlane *internal Reason: clusterv1.MachineNotUpToDateReason, Message: message, }) - continue } conditions.Set(machine, metav1.Condition{ diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index c4ac15f19108..6f486ae4afc5 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -33,8 +33,10 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/clientcmd" @@ -55,7 +57,6 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" @@ -69,6 +70,10 @@ import ( "sigs.k8s.io/cluster-api/util/test/builder" ) +const ( + timeout = time.Second * 30 +) + func TestClusterToKubeadmControlPlane(t *testing.T) { g := NewWithT(t) fakeClient := newFakeClient() @@ -1705,7 +1710,6 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { namespace, testCluster := setup(t, g) defer teardown(t, g, namespace, testCluster) - classicManager := "manager" duration5s := ptr.To(int32(5)) duration10s := ptr.To(int32(10)) @@ -1720,12 +1724,12 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { "metadata": map[string]interface{}{ "name": "existing-inframachine", "namespace": testCluster.Namespace, - "labels": map[string]string{ + "labels": map[string]interface{}{ "preserved-label": "preserved-value", "dropped-label": "dropped-value", "modified-label": "modified-value", }, - "annotations": map[string]string{ + "annotations": map[string]interface{}{ "preserved-annotation": "preserved-value", "dropped-annotation": "dropped-value", "modified-annotation": "modified-value", @@ -1739,8 +1743,6 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "existing-inframachine", APIGroup: clusterv1.GroupVersionInfrastructure.Group, } - // Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0. - g.Expect(env.Create(ctx, existingInfraMachine, client.FieldOwner("manager"))).To(Succeed()) // Existing KubeadmConfig bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ @@ -1772,15 +1774,13 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "existing-kubeadmconfig", APIGroup: bootstrapv1.GroupVersion.Group, } - // Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0. - g.Expect(env.Create(ctx, existingKubeadmConfig, client.FieldOwner("manager"))).To(Succeed()) // Existing Machine to validate in-place mutation fd := "fd1" inPlaceMutatingMachine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ - Kind: "Machine", APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ Name: "existing-machine", @@ -1812,7 +1812,6 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { }, }, } - g.Expect(env.PatchAndWait(ctx, inPlaceMutatingMachine, client.FieldOwner(kcpManagerName))).To(Succeed()) // Existing machine that is in deleting state deletingMachine := &clusterv1.Machine{ @@ -1835,7 +1834,11 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "inframachine", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To("machine-bootstrap-secret"), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmConfig", + Name: "non-existing-kubeadmconfig", + APIGroup: bootstrapv1.GroupVersion.Group, + }, }, Deletion: clusterv1.MachineDeletionSpec{ NodeDrainTimeoutSeconds: duration5s, @@ -1845,16 +1848,6 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { ReadinessGates: desiredstate.MandatoryMachineReadinessGates, }, } - g.Expect(env.PatchAndWait(ctx, deletingMachine, client.FieldOwner(kcpManagerName))).To(Succeed()) - // Delete the machine to put it in the deleting state - g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed()) - // Wait till the machine is marked for deletion - g.Eventually(func() bool { - if err := env.Get(ctx, client.ObjectKeyFromObject(deletingMachine), deletingMachine); err != nil { - return false - } - return !deletingMachine.DeletionTimestamp.IsZero() - }, 30*time.Second).Should(BeTrue()) // Existing machine that has a InfrastructureRef which does not exist. nilInfraMachineMachine := &clusterv1.Machine{ @@ -1877,12 +1870,14 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "inframachine", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To("machine-bootstrap-secret"), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmConfig", + Name: "non-existing-kubeadmconfig", + APIGroup: bootstrapv1.GroupVersion.Group, + }, }, }, } - g.Expect(env.Create(ctx, nilInfraMachineMachine, client.FieldOwner(classicManager))).To(Succeed()) - // Delete the machine to put it in the deleting state kcp := &controlplanev1.KubeadmControlPlane{ TypeMeta: metav1.TypeMeta{ @@ -1926,6 +1921,32 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { }, } + // + // Create objects + // + + // Create InfraMachine (same as in createInfraMachine) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, existingInfraMachine)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), existingInfraMachine, kcpManagerName)).To(Succeed()) + + // Create KubeadmConfig (same as in createKubeadmConfig) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, existingKubeadmConfig)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), existingKubeadmConfig, kcpManagerName)).To(Succeed()) + + // Create Machines (same as in createMachine) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, inPlaceMutatingMachine)).To(Succeed()) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, deletingMachine)).To(Succeed()) + // Delete the machine to put it in the deleting state + g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed()) + // Wait till the machine is marked for deletion + g.Eventually(func() bool { + if err := env.Get(ctx, client.ObjectKeyFromObject(deletingMachine), deletingMachine); err != nil { + return false + } + return !deletingMachine.DeletionTimestamp.IsZero() + }, timeout).Should(BeTrue()) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, nilInfraMachineMachine)).To(Succeed()) + controlPlane := &internal.ControlPlane{ KCP: kcp, Cluster: testCluster, @@ -1951,66 +1972,67 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Run syncMachines to clean up managed fields and have proper field ownership // for Machines, InfrastructureMachines and KubeadmConfigs. reconciler := &KubeadmControlPlaneReconciler{ - Client: env, + // Note: Ensure the fieldManager defaults to manager like in prod. + // Otherwise it defaults to the binary name which is not manager in tests. + Client: client.WithFieldOwner(env.Client, "manager"), SecretCachingClient: secretCachingClient, ssaCache: ssa.NewCache("test-controller"), } g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed()) - // The inPlaceMutatingMachine should have cleaned up managed fields. - updatedInplaceMutatingMachine := inPlaceMutatingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed()) - // Verify ManagedFields - g.Expect(updatedInplaceMutatingMachine.ManagedFields).Should( - ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)), - "in-place mutable machine should contain an entry for SSA manager", - ) - g.Expect(updatedInplaceMutatingMachine.ManagedFields).ShouldNot( - ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)), - "in-place mutable machine should not contain an entry for old manager", - ) + updatedInPlaceMutatingMachine := inPlaceMutatingMachine.DeepCopy() + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInPlaceMutatingMachine), updatedInPlaceMutatingMachine)).To(Succeed()) + g.Expect(cleanupTime(updatedInPlaceMutatingMachine.ManagedFields)).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane owns almost everything. + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: "{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:pre-terminate.delete.hook.machine.cluster.x-k8s.io/kcp-cleanup\":{},\"f:preserved-annotation\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/cluster-name\":{},\"f:cluster.x-k8s.io/control-plane\":{},\"f:cluster.x-k8s.io/control-plane-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}},\"f:ownerReferences\":{\"k:{\\\"uid\\\":\\\"abc-123-control-plane\\\"}\":{}}},\"f:spec\":{\"f:bootstrap\":{\"f:configRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}}},\"f:clusterName\":{},\"f:deletion\":{\"f:nodeDeletionTimeoutSeconds\":{},\"f:nodeDrainTimeoutSeconds\":{},\"f:nodeVolumeDetachTimeoutSeconds\":{}},\"f:failureDomain\":{},\"f:infrastructureRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}},\"f:readinessGates\":{\"k:{\\\"conditionType\\\":\\\"APIServerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"ControllerManagerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"EtcdMemberHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"EtcdPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"SchedulerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}}},\"f:version\":{}}}", + }}))) + }, timeout).Should(Succeed()) - // The InfrastructureMachine should have ownership of "labels" and "annotations" transferred to - // "capi-kubeadmcontrolplane" manager. updatedInfraMachine := existingInfraMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) + g.Expect(cleanupTime(updatedInfraMachine.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane-metadata owns labels and annotations. + Manager: kcpMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: updatedInfraMachine.GetAPIVersion(), + FieldsV1: "{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:preserved-annotation\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/cluster-name\":{},\"f:cluster.x-k8s.io/control-plane\":{},\"f:cluster.x-k8s.io/control-plane-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}}}}", + }, { + // capi-kubeadmcontrolplane owns almost everything. + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: updatedInfraMachine.GetAPIVersion(), + FieldsV1: "{\"f:spec\":{\"f:infra-field\":{}}}", + }}))) + }, timeout).Should(Succeed()) - // Verify ManagedFields - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"})) - g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"})) - // Verify ownership of other fields is not changed. - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"})) - - // The KubeadmConfig should have ownership of "labels" and "annotations" transferred to - // "capi-kubeadmcontrolplane" manager. updatedKubeadmConfig := existingKubeadmConfig.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed()) - - // Verify ManagedFields - g.Expect(updatedKubeadmConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedKubeadmConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"})) - g.Expect(updatedKubeadmConfig.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedKubeadmConfig.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"})) - // Verify ownership of other fields is not changed. - g.Expect(updatedKubeadmConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"})) + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed()) + g.Expect(cleanupTime(updatedKubeadmConfig.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane-metadata owns labels and annotations. + Manager: kcpMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: bootstrapv1.GroupVersion.String(), + FieldsV1: "{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:preserved-annotation\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/cluster-name\":{},\"f:cluster.x-k8s.io/control-plane\":{},\"f:cluster.x-k8s.io/control-plane-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}}}}", + }, { + // capi-kubeadmcontrolplane owns almost everything. + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: bootstrapv1.GroupVersion.String(), + FieldsV1: "{\"f:spec\":{\"f:users\":{}}}", + }}))) + }, timeout).Should(Succeed()) // // Verify In-place mutating fields // - // Update KCP and verify the in-place mutating fields are propagated. + // Update the KCP and verify the in-place mutating fields are propagated. kcp.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ "preserved-label": "preserved-value", // Keep the label and value as is "modified-label": "modified-value-2", // Modify the value of the label @@ -2031,50 +2053,34 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds = duration10s kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds = duration10s kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = duration10s - - // Use the updated KCP. controlPlane.KCP = kcp g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed()) // Verify in-place mutable fields are updated on the Machine. - updatedInplaceMutatingMachine = inPlaceMutatingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed()) - // Verify Labels - g.Expect(updatedInplaceMutatingMachine.Labels).Should(Equal(expectedLabels)) - // Verify Annotations + updatedInPlaceMutatingMachine = inPlaceMutatingMachine.DeepCopy() + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInPlaceMutatingMachine), updatedInPlaceMutatingMachine)).To(Succeed()) + g.Expect(updatedInPlaceMutatingMachine.Labels).Should(Equal(expectedLabels)) expectedAnnotations := map[string]string{} for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { expectedAnnotations[k] = v } // The pre-terminate annotation should always be added expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" - g.Expect(updatedInplaceMutatingMachine.Annotations).Should(Equal(expectedAnnotations)) - // Verify Node timeout values - g.Expect(updatedInplaceMutatingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds)), - )) - g.Expect(updatedInplaceMutatingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds)), - )) - g.Expect(updatedInplaceMutatingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)), - )) + g.Expect(updatedInPlaceMutatingMachine.Annotations).Should(Equal(expectedAnnotations)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)) // Verify that the non in-place mutating fields remain the same. - g.Expect(updatedInplaceMutatingMachine.Spec.FailureDomain).Should(Equal(inPlaceMutatingMachine.Spec.FailureDomain)) - g.Expect(updatedInplaceMutatingMachine.Spec.ProviderID).Should(Equal(inPlaceMutatingMachine.Spec.ProviderID)) - g.Expect(updatedInplaceMutatingMachine.Spec.Version).Should(Equal(inPlaceMutatingMachine.Spec.Version)) - g.Expect(updatedInplaceMutatingMachine.Spec.InfrastructureRef).Should(BeComparableTo(inPlaceMutatingMachine.Spec.InfrastructureRef)) - g.Expect(updatedInplaceMutatingMachine.Spec.Bootstrap).Should(BeComparableTo(inPlaceMutatingMachine.Spec.Bootstrap)) + g.Expect(updatedInPlaceMutatingMachine.Spec.FailureDomain).Should(Equal(inPlaceMutatingMachine.Spec.FailureDomain)) + g.Expect(updatedInPlaceMutatingMachine.Spec.ProviderID).Should(Equal(inPlaceMutatingMachine.Spec.ProviderID)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Version).Should(Equal(inPlaceMutatingMachine.Spec.Version)) + g.Expect(updatedInPlaceMutatingMachine.Spec.InfrastructureRef).Should(BeComparableTo(inPlaceMutatingMachine.Spec.InfrastructureRef)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Bootstrap).Should(BeComparableTo(inPlaceMutatingMachine.Spec.Bootstrap)) // Verify in-place mutable fields are updated on InfrastructureMachine updatedInfraMachine = existingInfraMachine.DeepCopy() g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) - // Verify Labels g.Expect(updatedInfraMachine.GetLabels()).Should(Equal(expectedLabels)) - // Verify Annotations g.Expect(updatedInfraMachine.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations)) // Verify spec remains the same g.Expect(updatedInfraMachine.Object).Should(HaveKeyWithValue("spec", infraMachineSpec)) @@ -2082,31 +2088,35 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Verify in-place mutable fields are updated on the KubeadmConfig. updatedKubeadmConfig = existingKubeadmConfig.DeepCopy() g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed()) - // Verify Labels g.Expect(updatedKubeadmConfig.GetLabels()).Should(Equal(expectedLabels)) - // Verify Annotations g.Expect(updatedKubeadmConfig.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations)) // Verify spec remains the same g.Expect(updatedKubeadmConfig.Spec).Should(BeComparableTo(existingKubeadmConfig.Spec)) - // The deleting machine should not change. - updatedDeletingMachine := deletingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) - // Verify ManagedFields - g.Expect(updatedDeletingMachine.ManagedFields).Should( - ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)), - "deleting machine should contain an entry for SSA manager", - ) - g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot( - ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)), - "deleting machine should not contain an entry for old manager", - ) + g.Eventually(func(g Gomega) { + updatedDeletingMachine := deletingMachine.DeepCopy() + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) + g.Expect(cleanupTime(updatedDeletingMachine.ManagedFields)).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane owns almost everything. + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: "{\"f:metadata\":{\"f:finalizers\":{\"v:\\\"testing-finalizer\\\"\":{}}},\"f:spec\":{\"f:bootstrap\":{\"f:configRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}}},\"f:clusterName\":{},\"f:infrastructureRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}},\"f:readinessGates\":{\"k:{\\\"conditionType\\\":\\\"APIServerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"ControllerManagerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"SchedulerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}}}}}", + }, { + // capi-kubeadmcontrolplane owns the fields that are propagated in-place for deleting Machines in syncMachines via patchHelper. + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: "{\"f:spec\":{\"f:deletion\":{\"f:nodeDeletionTimeoutSeconds\":{},\"f:nodeDrainTimeoutSeconds\":{},\"f:nodeVolumeDetachTimeoutSeconds\":{}}}}", + }}))) + }, timeout).Should(Succeed()) - // Verify the machine labels and annotations are unchanged. - g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) - g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) - // Verify Node timeout values + // Verify in-place mutable fields are updated on the deleting Machine. + updatedDeletingMachine := deletingMachine.DeepCopy() + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) + g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) // Not propagated to a deleting Machine + g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) // Not propagated to a deleting Machine g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds)) g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds)) g.Expect(updatedDeletingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)) @@ -3923,9 +3933,17 @@ func TestObjectsPendingDelete(t *testing.T) { // test utils. func newFakeClient(initObjs ...client.Object) client.WithWatch { + // Use a new scheme to avoid side effects if multiple tests are sharing the same global scheme. + scheme := runtime.NewScheme() + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) return &fakeClient{ startTime: time.Now(), - WithWatch: fake.NewClientBuilder().WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(), + WithWatch: fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(), } } diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index 7a22da80aecb..54865dd3d25f 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -234,10 +234,23 @@ func (r *KubeadmControlPlaneReconciler) createInfraMachine(ctx context.Context, return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") } - if err := r.Client.Create(ctx, infraMachine); err != nil { + // Create the full object with capi-kubeadmcontrolplane. + // Below ssa.RemoveManagedFieldsForLabelsAndAnnotations will drop ownership for labels and annotations + // so that in a subsequent syncMachines call capi-kubeadmcontrolplane-metadata can take ownership for them. + // Note: This is done in way that it does not rely on managedFields being stored in the cache, so we can optimize + // memory usage by dropping managedFields before storing objects in the cache. + if err := ssa.Patch(ctx, r.Client, kcpManagerName, infraMachine); err != nil { return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") } + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on KubeadmConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + if !r.disableRemoveManagedFieldsForLabelsAndAnnotations { + if err := ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, r.Client, r.APIReader, infraMachine, kcpManagerName); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") + } + } + return infraMachine, clusterv1.ContractVersionedObjectReference{ APIGroup: infraMachine.GroupVersionKind().Group, Kind: infraMachine.GetKind(), @@ -251,10 +264,23 @@ func (r *KubeadmControlPlaneReconciler) createKubeadmConfig(ctx context.Context, return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create KubeadmConfig") } - if err := r.Client.Create(ctx, kubeadmConfig); err != nil { + // Create the full object with capi-kubeadmcontrolplane. + // Below ssa.RemoveManagedFieldsForLabelsAndAnnotations will drop ownership for labels and annotations + // so that in a subsequent syncMachines call capi-kubeadmcontrolplane-metadata can take ownership for them. + // Note: This is done in way that it does not rely on managedFields being stored in the cache, so we can optimize + // memory usage by dropping managedFields before storing objects in the cache. + if err := ssa.Patch(ctx, r.Client, kcpManagerName, kubeadmConfig); err != nil { return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create KubeadmConfig") } + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on KubeadmConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + if !r.disableRemoveManagedFieldsForLabelsAndAnnotations { + if err := ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, r.Client, r.APIReader, kubeadmConfig, kcpManagerName); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create KubeadmConfig") + } + } + return kubeadmConfig, clusterv1.ContractVersionedObjectReference{ APIGroup: bootstrapv1.GroupVersion.Group, Kind: "KubeadmConfig", @@ -262,8 +288,8 @@ func (r *KubeadmControlPlaneReconciler) createKubeadmConfig(ctx context.Context, }, nil } -// updateExternalObject updates the external object with the labels and annotations from KCP. -func (r *KubeadmControlPlaneReconciler) updateExternalObject(ctx context.Context, obj client.Object, objGVK schema.GroupVersionKind, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { +// updateLabelsAndAnnotations updates the external object with the labels and annotations from KCP. +func (r *KubeadmControlPlaneReconciler) updateLabelsAndAnnotations(ctx context.Context, obj client.Object, objGVK schema.GroupVersionKind, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { updatedObject := &unstructured.Unstructured{} updatedObject.SetGroupVersionKind(objGVK) updatedObject.SetNamespace(obj.GetNamespace()) @@ -272,12 +298,10 @@ func (r *KubeadmControlPlaneReconciler) updateExternalObject(ctx context.Context // and does not perform an accidental create. updatedObject.SetUID(obj.GetUID()) - // Update labels updatedObject.SetLabels(desiredstate.ControlPlaneMachineLabels(kcp, cluster.Name)) - // Update annotations updatedObject.SetAnnotations(desiredstate.ControlPlaneMachineAnnotations(kcp)) - return ssa.Patch(ctx, r.Client, kcpManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}) + return ssa.Patch(ctx, r.Client, kcpMetadataManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}) } func (r *KubeadmControlPlaneReconciler) createMachine(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) error { diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index cfd3a3b0ec05..595626bcfea2 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "strings" "testing" . "github.com/onsi/gomega" @@ -38,6 +39,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/internal/util/ssa" + "sigs.k8s.io/cluster-api/util/collections" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" @@ -296,7 +299,7 @@ func TestKubeadmControlPlaneReconciler_reconcileKubeconfig(t *testing.T) { g.Expect(kubeconfigSecret.Labels).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, cluster.Name)) } -func TestCloneConfigsAndGenerateMachine(t *testing.T) { +func TestCloneConfigsAndGenerateMachineAndSyncMachines(t *testing.T) { setup := func(t *testing.T, g *WithT) *corev1.Namespace { t.Helper() @@ -383,6 +386,7 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { r := &KubeadmControlPlaneReconciler{ Client: env, SecretCachingClient: secretCachingClient, + ssaCache: ssa.NewCache("test-controller"), recorder: record.NewFakeRecorder(32), } @@ -393,44 +397,158 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(machineList.Items).To(HaveLen(1)) - for i := range machineList.Items { - m := machineList.Items[i] - g.Expect(m.Namespace).To(Equal(cluster.Namespace)) - g.Expect(m.Name).NotTo(BeEmpty()) - g.Expect(m.Name).To(HavePrefix(kcp.Name + namingTemplateKey)) - g.Expect(m.Spec.InfrastructureRef.Name).To(Equal(m.Name)) - g.Expect(m.Spec.InfrastructureRef.APIGroup).To(Equal(genericInfrastructureMachineTemplate.GroupVersionKind().Group)) - g.Expect(m.Spec.InfrastructureRef.Kind).To(Equal("GenericInfrastructureMachine")) + m := machineList.Items[0] + g.Expect(m.Namespace).To(Equal(cluster.Namespace)) + g.Expect(m.Name).NotTo(BeEmpty()) + g.Expect(m.Name).To(HavePrefix(kcp.Name + namingTemplateKey)) + g.Expect(m.Spec.InfrastructureRef.Name).To(Equal(m.Name)) + g.Expect(m.Spec.InfrastructureRef.APIGroup).To(Equal(genericInfrastructureMachineTemplate.GroupVersionKind().Group)) + g.Expect(m.Spec.InfrastructureRef.Kind).To(Equal("GenericInfrastructureMachine")) - g.Expect(m.Spec.Bootstrap.ConfigRef.Name).To(Equal(m.Name)) - g.Expect(m.Spec.Bootstrap.ConfigRef.APIGroup).To(Equal(bootstrapv1.GroupVersion.Group)) - g.Expect(m.Spec.Bootstrap.ConfigRef.Kind).To(Equal("KubeadmConfig")) + g.Expect(m.Spec.Bootstrap.ConfigRef.Name).To(Equal(m.Name)) + g.Expect(m.Spec.Bootstrap.ConfigRef.APIGroup).To(Equal(bootstrapv1.GroupVersion.Group)) + g.Expect(m.Spec.Bootstrap.ConfigRef.Kind).To(Equal("KubeadmConfig")) - infraObj, err := external.GetObjectFromContractVersionedRef(ctx, env.GetAPIReader(), m.Spec.InfrastructureRef, m.Namespace) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(infraObj.GetOwnerReferences()).To(HaveLen(1)) - g.Expect(infraObj.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{ - APIVersion: controlplanev1.GroupVersion.String(), - Kind: "KubeadmControlPlane", - Name: kcp.Name, - UID: kcp.UID, - })) - g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) - g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) - - kubeadmConfig := &bootstrapv1.KubeadmConfig{} - err = env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(kubeadmConfig.OwnerReferences).To(HaveLen(1)) - g.Expect(kubeadmConfig.OwnerReferences).To(ContainElement(metav1.OwnerReference{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - Name: kcp.Name, - UID: kcp.UID, - })) - g.Expect(kubeadmConfig.Spec.InitConfiguration).To(BeComparableTo(bootstrapv1.InitConfiguration{})) - g.Expect(kubeadmConfig.Spec.JoinConfiguration).To(BeComparableTo(kcp.Spec.KubeadmConfigSpec.JoinConfiguration)) - } + infraObj, err := external.GetObjectFromContractVersionedRef(ctx, env.GetAPIReader(), m.Spec.InfrastructureRef, m.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(infraObj.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(infraObj.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + })) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) + // Note: capi-kubeadmcontrolplane should own ownerReferences and spec, labels and annotations should be orphaned. + // Labels and annotations will be owned by capi-kubeadmcontrolplane-metadata after the next update + // of labels and annotations. + g.Expect(cleanupTime(infraObj.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + APIVersion: infraObj.GetAPIVersion(), + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"abc-123-kcp-control-plane\"}":{} + } +}, +"f:spec":{ + "f:hello":{} +}}`, + }}))) + + kubeadmConfig := &bootstrapv1.KubeadmConfig{} + err = env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(kubeadmConfig.OwnerReferences).To(HaveLen(1)) + g.Expect(kubeadmConfig.OwnerReferences).To(ContainElement(metav1.OwnerReference{ + Kind: "KubeadmControlPlane", + APIVersion: controlplanev1.GroupVersion.String(), + Name: kcp.Name, + UID: kcp.UID, + })) + g.Expect(kubeadmConfig.Spec.InitConfiguration).To(BeComparableTo(bootstrapv1.InitConfiguration{})) + g.Expect(kubeadmConfig.Spec.JoinConfiguration).To(BeComparableTo(kcp.Spec.KubeadmConfigSpec.JoinConfiguration)) + // Note: capi-kubeadmcontrolplane should own ownerReferences and spec, labels and annotations should be orphaned. + // Labels and annotations will be owned by capi-kubeadmcontrolplane-metadata after the next update + // of labels and annotations. + g.Expect(cleanupTime(kubeadmConfig.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + APIVersion: bootstrapv1.GroupVersion.String(), + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"abc-123-kcp-control-plane\"}":{} + } +}, +"f:spec":{ + "f:joinConfiguration":{ + "f:nodeRegistration":{ + "f:kubeletExtraArgs":{ + "k:{\"name\":\"v\",\"value\":\"8\"}":{ + ".":{},"f:name":{},"f:value":{}} + } + } + } +}}`, + }}))) + + // Sync Machines + controlPlane, err := internal.NewControlPlane(ctx, r.managementCluster, r.Client, cluster, kcp, collections.FromMachines(&m)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.syncMachines(ctx, controlPlane)).To(Succeed()) + + // Verify managedFields again. + infraObj, err = external.GetObjectFromContractVersionedRef(ctx, env.GetAPIReader(), m.Spec.InfrastructureRef, m.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cleanupTime(infraObj.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane-metadata owns labels and annotations + APIVersion: infraObj.GetAPIVersion(), + Manager: kcpMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{}, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:cluster.x-k8s.io/control-plane":{}, + "f:cluster.x-k8s.io/control-plane-name":{} + } +}}`, + }, { + // capi-kubeadmcontrolplane owns ownerReferences and spec + APIVersion: infraObj.GetAPIVersion(), + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"abc-123-kcp-control-plane\"}":{} + } +}, +"f:spec":{ + "f:hello":{} +}}`, + }}))) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig)).To(Succeed()) + g.Expect(cleanupTime(kubeadmConfig.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane-metadata owns labels and annotations + APIVersion: bootstrapv1.GroupVersion.String(), + Manager: kcpMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{}, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:cluster.x-k8s.io/control-plane":{}, + "f:cluster.x-k8s.io/control-plane-name":{} + } +}}`, + }, { + // capi-kubeadmcontrolplane owns ownerReferences and spec + APIVersion: bootstrapv1.GroupVersion.String(), + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"abc-123-kcp-control-plane\"}":{} + } +}, +"f:spec":{ + "f:joinConfiguration":{ + "f:nodeRegistration":{ + "f:kubeletExtraArgs":{ + "k:{\"name\":\"v\",\"value\":\"8\"}":{ + ".":{},"f:name":{},"f:value":{}} + } + } + } +}}`, + }}))) } func TestCloneConfigsAndGenerateMachineFailInfraMachineCreation(t *testing.T) { @@ -507,7 +625,7 @@ func TestCloneConfigsAndGenerateMachineFailInfraMachineCreation(t *testing.T) { infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ Group: builder.InfrastructureGroupVersion.Group, Version: builder.InfrastructureGroupVersion.Version, - Kind: builder.GenericInfrastructureMachineKind, + Kind: builder.GenericInfrastructureMachineKind + "List", }) g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(infraMachineList.Items).To(BeEmpty()) @@ -569,6 +687,9 @@ func TestCloneConfigsAndGenerateMachineFailKubeadmConfigCreation(t *testing.T) { Client: fakeClient, SecretCachingClient: fakeClient, recorder: record.NewFakeRecorder(32), + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + disableRemoveManagedFieldsForLabelsAndAnnotations: true, } // Break KubeadmConfig computation @@ -590,7 +711,7 @@ func TestCloneConfigsAndGenerateMachineFailKubeadmConfigCreation(t *testing.T) { infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ Group: builder.InfrastructureGroupVersion.Group, Version: builder.InfrastructureGroupVersion.Version, - Kind: builder.GenericInfrastructureMachineKind, + Kind: builder.GenericInfrastructureMachineKind + "List", }) g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(infraMachineList.Items).To(BeEmpty()) @@ -665,6 +786,9 @@ func TestCloneConfigsAndGenerateMachineFailMachineCreation(t *testing.T) { Client: fakeClient, SecretCachingClient: fakeClient, recorder: record.NewFakeRecorder(32), + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + disableRemoveManagedFieldsForLabelsAndAnnotations: true, } _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") @@ -684,7 +808,7 @@ func TestCloneConfigsAndGenerateMachineFailMachineCreation(t *testing.T) { infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ Group: builder.InfrastructureGroupVersion.Group, Version: builder.InfrastructureGroupVersion.Version, - Kind: builder.GenericInfrastructureMachineKind, + Kind: builder.GenericInfrastructureMachineKind + "List", }) g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(infraMachineList.Items).To(BeEmpty()) @@ -790,3 +914,39 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { }) } } + +func cleanupTime(fields []metav1.ManagedFieldsEntry) []metav1.ManagedFieldsEntry { + for i := range fields { + fields[i].Time = nil + } + return fields +} + +type managedFieldEntry struct { + Manager string + Operation metav1.ManagedFieldsOperationType + APIVersion string + FieldsV1 string + Subresource string +} + +func toManagedFields(managedFields []managedFieldEntry) []metav1.ManagedFieldsEntry { + res := []metav1.ManagedFieldsEntry{} + for _, f := range managedFields { + res = append(res, metav1.ManagedFieldsEntry{ + Manager: f.Manager, + Operation: f.Operation, + APIVersion: f.APIVersion, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(trimSpaces(f.FieldsV1))}, + Subresource: f.Subresource, + }) + } + return res +} + +func trimSpaces(s string) string { + s = strings.ReplaceAll(s, "\n", "") + s = strings.ReplaceAll(s, "\t", "") + return s +} diff --git a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go index 246d63435eb7..deaa84ea8610 100644 --- a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go +++ b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go @@ -163,7 +163,6 @@ func createRequest(ctx context.Context, c client.Client, currentMachine *cluster // InfraMachine // Note: Both currentInfraMachineForDiff and desiredInfraMachineForDiff need a dry-run to ensure changes // in defaulting logic and fields added by other controllers don't lead to an unintended diff. - // TODO(in-place) change the fieldManager to the one we use later when we trigger the in-place update. if err := ssa.Patch(ctx, c, kcpManagerName, currentInfraMachineForDiff, ssa.WithDryRun{}); err != nil { return nil, errors.Wrap(err, "server side apply dry-run failed for current InfraMachine") } diff --git a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go index 52fe84b37e86..c2edb0e33a1d 100644 --- a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go +++ b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/cluster-api/feature" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/util/compare" + "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -836,24 +837,31 @@ func Test_createRequest(t *testing.T) { ctx := t.Context() g := NewWithT(t) - // TODO(in-place) change the fieldManagers to the correct ones later after the SSA refactoring + // Create Machine (same as in createMachine) currentMachineForPatch := tt.currentMachine.DeepCopy() - currentMachineForPatch.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) // Has to be set for env.PatchAndWait - g.Expect(env.PatchAndWait(ctx, currentMachineForPatch, client.ForceOwnership, client.FieldOwner(kcpManagerName))).To(Succeed()) + currentMachineForPatch.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) // Has to be set for ssa.Patch + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentMachineForPatch)).To(Succeed()) t.Cleanup(func() { g.Expect(env.CleanupAndWait(context.Background(), tt.currentMachine)).To(Succeed()) }) + + // Create InfraMachine (same as in createInfraMachine) currentInfraMachineForPatch := tt.currentInfraMachine.DeepCopy() - g.Expect(env.PatchAndWait(ctx, currentInfraMachineForPatch, client.ForceOwnership, client.FieldOwner(kcpManagerName))).To(Succeed()) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentInfraMachineForPatch)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), currentInfraMachineForPatch, kcpManagerName)).To(Succeed()) t.Cleanup(func() { g.Expect(env.CleanupAndWait(context.Background(), tt.currentInfraMachine)).To(Succeed()) }) + + // Create KubeadmConfig (same as in createKubeadmConfig) currentKubeadmConfigForPatch := tt.currentKubeadmConfig.DeepCopy() - currentKubeadmConfigForPatch.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) // Has to be set for env.PatchAndWait - g.Expect(env.PatchAndWait(ctx, currentKubeadmConfigForPatch, client.ForceOwnership, client.FieldOwner(kcpManagerName))).To(Succeed()) + currentKubeadmConfigForPatch.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) // Has to be set for ssa.Patch + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentKubeadmConfigForPatch)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), currentKubeadmConfigForPatch, kcpManagerName)).To(Succeed()) t.Cleanup(func() { g.Expect(env.CleanupAndWait(context.Background(), tt.currentKubeadmConfig)).To(Succeed()) }) + if tt.modifyMachineAfterCreate != nil { g.Expect(tt.modifyMachineAfterCreate(ctx, env.Client, currentMachineForPatch)).To(Succeed()) } diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index b89bcddeda20..37df6cbfb7de 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -486,6 +486,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { if err := (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ Client: mgr.GetClient(), + APIReader: mgr.GetAPIReader(), SecretCachingClient: secretCachingClient, ClusterCache: clusterCache, WatchFilterValue: watchFilterValue, diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 044751a55198..0de38cf2ba79 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -79,7 +79,10 @@ var ( stateConfirmationInterval = 100 * time.Millisecond ) -const machineSetManagerName = "capi-machineset" +const ( + machineSetManagerName = "capi-machineset" + machineSetMetadataManagerName = "capi-machineset-metadata" +) // Update permissions on /finalizers subresrouce is required on management clusters with 'OwnerReferencesPermissionEnforcement' plugin enabled. // See: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement @@ -102,6 +105,10 @@ type Reconciler struct { ssaCache ssa.Cache recorder record.EventRecorder + + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + disableRemoveManagedFieldsForLabelsAndAnnotations bool } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { @@ -561,19 +568,17 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e return ctrl.Result{}, errors.Wrapf(err, "failed to get InfrastructureMachine %s %s", updatedMachine.Spec.InfrastructureRef.Kind, klog.KRef(updatedMachine.Namespace, updatedMachine.Spec.InfrastructureRef.Name)) } - // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations - // from "manager". We do this so that InfrastructureMachines that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-machineset" and then we would not be able to e.g. drop labels and annotations. - labelsAndAnnotationsManagedFieldPaths := []contract.Path{ - {"f:metadata", "f:annotations"}, - {"f:metadata", "f:labels"}, - } - if err := ssa.DropManagedFields(ctx, r.Client, infraMachine, machineSetManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to update machine: failed to adjust the managedFields of the InfrastructureMachine %s", klog.KObj(infraMachine)) + // Drop managedFields for manager:Update and capi-machineset:Apply for all objects created with CAPI <= v1.11. + // Starting with CAPI v1.12 we have a new managedField structure where capi-machineset-metadata will own + // labels and annotations and capi-machineset everything else. + // Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once. + // Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can + // be removed with CAPI v1.15. + if err := ssa.MigrateManagedFields(ctx, r.Client, infraMachine, machineSetManagerName, machineSetMetadataManagerName); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine)) } // Update in-place mutating fields on InfrastructureMachine. - if err := r.updateExternalObject(ctx, infraMachine, machineSet); err != nil { + if err := r.updateLabelsAndAnnotations(ctx, infraMachine, machineSet); err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine)) } @@ -583,15 +588,17 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e return ctrl.Result{}, errors.Wrapf(err, "failed to get BootstrapConfig %s %s", updatedMachine.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(updatedMachine.Namespace, updatedMachine.Spec.Bootstrap.ConfigRef.Name)) } - // Cleanup managed fields of all BootstrapConfigs to drop ownership of labels and annotations - // from "manager". We do this so that BootstrapConfigs that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-machineset" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, bootstrapConfig, machineSetManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to update machine: failed to adjust the managedFields of the BootstrapConfig %s", klog.KObj(bootstrapConfig)) + // Drop managedFields for manager:Update and capi-machineset:Apply for all objects created with CAPI <= v1.11. + // Starting with CAPI v1.12 we have a new managedField structure where capi-machineset-metadata will own + // labels and annotations and capi-machineset everything else. + // Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once. + // Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can + // be removed with CAPI v1.15. + if err := ssa.MigrateManagedFields(ctx, r.Client, bootstrapConfig, machineSetManagerName, machineSetMetadataManagerName); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to clean up managedFields of BootstrapConfig %s", klog.KObj(bootstrapConfig)) } // Update in-place mutating fields on BootstrapConfig. - if err := r.updateExternalObject(ctx, bootstrapConfig, machineSet); err != nil { + if err := r.updateLabelsAndAnnotations(ctx, bootstrapConfig, machineSet); err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to update BootstrapConfig %s", klog.KObj(bootstrapConfig)) } } @@ -900,9 +907,9 @@ func (r *Reconciler) computeDesiredMachine(machineSet *clusterv1.MachineSet, exi return desiredMachine, nil } -// updateExternalObject updates the external object passed in with the +// updateLabelsAndAnnotations updates the external object passed in with the // updated labels and annotations from the MachineSet. -func (r *Reconciler) updateExternalObject(ctx context.Context, obj client.Object, machineSet *clusterv1.MachineSet) error { +func (r *Reconciler) updateLabelsAndAnnotations(ctx context.Context, obj client.Object, machineSet *clusterv1.MachineSet) error { updatedObject := &unstructured.Unstructured{} updatedObject.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) updatedObject.SetNamespace(obj.GetNamespace()) @@ -914,10 +921,7 @@ func (r *Reconciler) updateExternalObject(ctx context.Context, obj client.Object updatedObject.SetLabels(machineLabelsFromMachineSet(machineSet)) updatedObject.SetAnnotations(machineAnnotationsFromMachineSet(machineSet)) - if err := ssa.Patch(ctx, r.Client, machineSetManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}); err != nil { - return errors.Wrapf(err, "failed to update %s", klog.KObj(obj)) - } - return nil + return ssa.Patch(ctx, r.Client, machineSetMetadataManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}) } func (r *Reconciler) getOwnerMachineDeployment(ctx context.Context, machineSet *clusterv1.MachineSet) (*clusterv1.MachineDeployment, error) { @@ -1554,10 +1558,23 @@ func (r *Reconciler) createBootstrapConfig(ctx context.Context, ms *clusterv1.Ma return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create BootstrapConfig") } - if err := r.Client.Create(ctx, bootstrapConfig); err != nil { + // Create the full object with capi-machineset. + // Below ssa.RemoveManagedFieldsForLabelsAndAnnotations will drop ownership for labels and annotations + // so that in a subsequent syncMachines call capi-machineset-metadata can take ownership for them. + // Note: This is done in way that it does not rely on managedFields being stored in the cache, so we can optimize + // memory usage by dropping managedFields before storing objects in the cache. + if err := ssa.Patch(ctx, r.Client, machineSetManagerName, bootstrapConfig); err != nil { return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create BootstrapConfig") } + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + if !r.disableRemoveManagedFieldsForLabelsAndAnnotations { + if err := ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, r.Client, r.APIReader, bootstrapConfig, machineSetManagerName); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create BootstrapConfig") + } + } + return bootstrapConfig, clusterv1.ContractVersionedObjectReference{ APIGroup: bootstrapConfig.GroupVersionKind().Group, Kind: bootstrapConfig.GetKind(), @@ -1611,10 +1628,23 @@ func (r *Reconciler) createInfraMachine(ctx context.Context, ms *clusterv1.Machi return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") } - if err := r.Client.Create(ctx, infraMachine); err != nil { + // Create the full object with capi-machineset. + // Below ssa.RemoveManagedFieldsForLabelsAndAnnotations will drop ownership for labels and annotations + // so that in a subsequent syncMachines call capi-machineset-metadata can take ownership for them. + // Note: This is done in way that it does not rely on managedFields being stored in the cache, so we can optimize + // memory usage by dropping managedFields before storing objects in the cache. + if err := ssa.Patch(ctx, r.Client, machineSetManagerName, infraMachine); err != nil { return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") } + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + if !r.disableRemoveManagedFieldsForLabelsAndAnnotations { + if err := ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, r.Client, r.APIReader, infraMachine, machineSetManagerName); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") + } + } + return infraMachine, clusterv1.ContractVersionedObjectReference{ APIGroup: infraMachine.GroupVersionKind().Group, Kind: infraMachine.GetKind(), diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 89e6f9554b66..62e22eb38263 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -26,6 +26,7 @@ import ( . "github.com/onsi/gomega" gomegatypes "github.com/onsi/gomega/types" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -120,7 +121,8 @@ func TestMachineSetReconciler(t *testing.T) { machineTemplateSpec := clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{ Labels: map[string]string{ - "label-1": "true", + "label-1": "true", + "precedence": "MachineSet", }, Annotations: map[string]string{ "annotation-1": "true", @@ -207,6 +209,12 @@ func TestMachineSetReconciler(t *testing.T) { "annotations": map[string]interface{}{ "precedence": "GenericBootstrapConfig", }, + "labels": map[string]interface{}{ + "precedence": "GenericBootstrapConfig", + }, + }, + "spec": map[string]interface{}{ + "format": "cloud-init", }, } bootstrapTmpl := &unstructured.Unstructured{ @@ -230,6 +238,9 @@ func TestMachineSetReconciler(t *testing.T) { "annotations": map[string]interface{}{ "precedence": "GenericInfrastructureMachineTemplate", }, + "labels": map[string]interface{}{ + "precedence": "GenericInfrastructureMachineTemplate", + }, }, "spec": map[string]interface{}{ "size": "3xlarge", @@ -293,22 +304,67 @@ func TestMachineSetReconciler(t *testing.T) { } return len(machines.Items) }, timeout).Should(BeEquivalentTo(replicas)) + machinesMap := map[string]*clusterv1.Machine{} + for _, m := range machines.Items { + machinesMap[m.Name] = &m + } - t.Log("Creating a InfrastructureMachine for each Machine") + t.Log("Verify InfrastructureMachine for each Machine") infraMachines := &unstructured.UnstructuredList{} infraMachines.SetAPIVersion(clusterv1.GroupVersionInfrastructure.String()) infraMachines.SetKind("GenericInfrastructureMachine") - g.Eventually(func() int { - if err := env.List(ctx, infraMachines, client.InNamespace(namespace.Name)); err != nil { - return -1 + g.Eventually(func(g Gomega) { + g.Expect(env.List(ctx, infraMachines, client.InNamespace(namespace.Name))).To(Succeed()) + g.Expect(infraMachines.Items).To(HaveLen(int(replicas))) + for _, im := range infraMachines.Items { + g.Expect(machinesMap).To(HaveKey(im.GetName())) + g.Expect(im.GetAnnotations()).To(HaveKeyWithValue("annotation-1", "true"), "have annotations of MachineTemplate applied") + g.Expect(im.GetAnnotations()).To(HaveKeyWithValue("precedence", "MachineSet"), "the annotations from the MachineSpec template to overwrite the infrastructure template ones") + g.Expect(im.GetLabels()).To(HaveKeyWithValue("label-1", "true"), "have labels of MachineTemplate applied") + g.Expect(im.GetLabels()).To(HaveKeyWithValue("precedence", "MachineSet"), "the labels from the MachineSpec template to overwrite the infrastructure template ones") + g.Expect(cleanupTime(im.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-machineset-metadata owns labels and annotations. + APIVersion: im.GetAPIVersion(), + Manager: machineSetMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{}, + "f:precedence":{} + }, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:cluster.x-k8s.io/deployment-name":{}, + "f:cluster.x-k8s.io/set-name":{}, + "f:label-1":{}, + "f:precedence":{} + } +}}`, + }, { + // capi-machineset owns spec. + APIVersion: im.GetAPIVersion(), + Manager: machineSetManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:spec":{ + "f:size":{} +}}`, + }, { + // Machine manager owns ownerReference. + APIVersion: im.GetAPIVersion(), + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: fmt.Sprintf(`{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"%s\"}":{} + } +}}`, machinesMap[im.GetName()].UID), + }}))) } - return len(machines.Items) - }, timeout).Should(BeEquivalentTo(replicas)) - for _, im := range infraMachines.Items { - g.Expect(im.GetAnnotations()).To(HaveKeyWithValue("annotation-1", "true"), "have annotations of MachineTemplate applied") - g.Expect(im.GetAnnotations()).To(HaveKeyWithValue("precedence", "MachineSet"), "the annotations from the MachineSpec template to overwrite the infrastructure template ones") - g.Expect(im.GetLabels()).To(HaveKeyWithValue("label-1", "true"), "have labels of MachineTemplate applied") - } + }, timeout).Should(Succeed()) + g.Eventually(func() bool { g.Expect(env.List(ctx, infraMachines, client.InNamespace(namespace.Name))).To(Succeed()) // The Machine reconciler should remove the ownerReference to the MachineSet on the InfrastructureMachine. @@ -327,21 +383,60 @@ func TestMachineSetReconciler(t *testing.T) { return !hasMSOwnerRef && hasMachineOwnerRef }, timeout).Should(BeTrue(), "infraMachine should not have ownerRef to MachineSet") - t.Log("Creating a BootstrapConfig for each Machine") + t.Log("Verify BootstrapConfig for each Machine") bootstrapConfigs := &unstructured.UnstructuredList{} bootstrapConfigs.SetAPIVersion(clusterv1.GroupVersionBootstrap.String()) bootstrapConfigs.SetKind("GenericBootstrapConfig") - g.Eventually(func() int { - if err := env.List(ctx, bootstrapConfigs, client.InNamespace(namespace.Name)); err != nil { - return -1 + g.Eventually(func(g Gomega) { + g.Expect(env.List(ctx, bootstrapConfigs, client.InNamespace(namespace.Name))).To(Succeed()) + g.Expect(bootstrapConfigs.Items).To(HaveLen(int(replicas))) + for _, im := range bootstrapConfigs.Items { + g.Expect(im.GetAnnotations()).To(HaveKeyWithValue("annotation-1", "true"), "have annotations of MachineTemplate applied") + g.Expect(im.GetAnnotations()).To(HaveKeyWithValue("precedence", "MachineSet"), "the annotations from the MachineSpec template to overwrite the bootstrap config template ones") + g.Expect(im.GetLabels()).To(HaveKeyWithValue("label-1", "true"), "have labels of MachineTemplate applied") + g.Expect(im.GetLabels()).To(HaveKeyWithValue("precedence", "MachineSet"), "the labels from the MachineSpec template to overwrite the bootstrap config template ones") + g.Expect(cleanupTime(im.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-machineset-metadata owns labels and annotations. + APIVersion: im.GetAPIVersion(), + Manager: machineSetMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{}, + "f:precedence":{} + }, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:cluster.x-k8s.io/deployment-name":{}, + "f:cluster.x-k8s.io/set-name":{}, + "f:label-1":{}, + "f:precedence":{} + } +}}`, + }, { + // capi-machineset owns spec. + APIVersion: im.GetAPIVersion(), + Manager: machineSetManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:spec":{ + "f:format":{} +}}`, + }, { + // Machine manager owns ownerReference. + APIVersion: im.GetAPIVersion(), + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: fmt.Sprintf(`{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"%s\"}":{} + } +}}`, machinesMap[im.GetName()].UID), + }}))) } - return len(machines.Items) - }, timeout).Should(BeEquivalentTo(replicas)) - for _, im := range bootstrapConfigs.Items { - g.Expect(im.GetAnnotations()).To(HaveKeyWithValue("annotation-1", "true"), "have annotations of MachineTemplate applied") - g.Expect(im.GetAnnotations()).To(HaveKeyWithValue("precedence", "MachineSet"), "the annotations from the MachineSpec template to overwrite the bootstrap config template ones") - g.Expect(im.GetLabels()).To(HaveKeyWithValue("label-1", "true"), "have labels of MachineTemplate applied") - } + }, timeout).Should(Succeed()) g.Eventually(func() bool { g.Expect(env.List(ctx, bootstrapConfigs, client.InNamespace(namespace.Name))).To(Succeed()) // The Machine reconciler should remove the ownerReference to the MachineSet on the Bootstrap object. @@ -1130,14 +1225,11 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { namespace, testCluster := setup(t, g) defer teardown(t, g, namespace, testCluster) - classicManager := "manager" replicas := int32(2) version := "v1.25.3" duration10s := ptr.To(int32(10)) - duration11s := ptr.To(int32(11)) ms := &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ - UID: "abc-123-ms-uid", Name: "ms-1", Namespace: namespace.Name, Labels: map[string]string{ @@ -1196,12 +1288,12 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { "metadata": map[string]interface{}{ "name": "infra-machine-1", "namespace": namespace.Name, - "labels": map[string]string{ + "labels": map[string]interface{}{ "preserved-label": "preserved-value", "dropped-label": "dropped-value", "modified-label": "modified-value", }, - "annotations": map[string]string{ + "annotations": map[string]interface{}{ "preserved-annotation": "preserved-value", "dropped-annotation": "dropped-value", "modified-annotation": "modified-value", @@ -1210,7 +1302,6 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { "spec": infraMachineSpec, }, } - g.Expect(env.Create(ctx, infraMachine, client.FieldOwner(classicManager))).To(Succeed()) bootstrapConfigSpec := map[string]interface{}{ "bootstrap-field": "bootstrap-value", @@ -1222,12 +1313,12 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { "metadata": map[string]interface{}{ "name": "bootstrap-config-1", "namespace": namespace.Name, - "labels": map[string]string{ + "labels": map[string]interface{}{ "preserved-label": "preserved-value", "dropped-label": "dropped-value", "modified-label": "modified-value", }, - "annotations": map[string]string{ + "annotations": map[string]interface{}{ "preserved-annotation": "preserved-value", "dropped-annotation": "dropped-value", "modified-annotation": "modified-value", @@ -1236,7 +1327,6 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { "spec": bootstrapConfigSpec, }, } - g.Expect(env.Create(ctx, bootstrapConfig, client.FieldOwner(classicManager))).To(Succeed()) inPlaceMutatingMachine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ @@ -1273,7 +1363,6 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { }, }, } - g.Expect(env.PatchAndWait(ctx, inPlaceMutatingMachine, client.FieldOwner(machineSetManagerName))).To(Succeed()) deletingMachine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ @@ -1299,7 +1388,25 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { }, }, } - g.Expect(env.PatchAndWait(ctx, deletingMachine, client.FieldOwner(machineSetManagerName))).To(Succeed()) + + // + // Create objects + // + + // Create MachineSet + g.Expect(env.CreateAndWait(ctx, ms)).To(Succeed()) + + // Create InfraMachine (same as in createInfraMachine) + g.Expect(ssa.Patch(ctx, env.Client, machineSetManagerName, infraMachine)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), infraMachine, machineSetManagerName)).To(Succeed()) + + // Create BootstrapConfig (same as in createBootstrapConfig) + g.Expect(ssa.Patch(ctx, env.Client, machineSetManagerName, bootstrapConfig)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), bootstrapConfig, machineSetManagerName)).To(Succeed()) + + // Create Machines (same as in syncReplicas) + g.Expect(ssa.Patch(ctx, env.Client, machineSetManagerName, inPlaceMutatingMachine)).To(Succeed()) + g.Expect(ssa.Patch(ctx, env.Client, machineSetManagerName, deletingMachine)).To(Succeed()) // Delete the machine to put it in the deleting state g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed()) // Wait till the machine is marked for deletion @@ -1319,7 +1426,9 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { // Run syncMachines to clean up managed fields and have proper field ownership // for Machines, InfrastructureMachines and BootstrapConfigs. reconciler := &Reconciler{ - Client: env, + // Note: Ensure the fieldManager defaults to manager like in prod. + // Otherwise it defaults to the binary name which is not manager in tests. + Client: client.WithFieldOwner(env.Client, "manager"), ssaCache: ssa.NewCache("test-controller"), } s := &scope{ @@ -1330,58 +1439,87 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { _, err := reconciler.syncMachines(ctx, s) g.Expect(err).ToNot(HaveOccurred()) - // The inPlaceMutatingMachine should have cleaned up managed fields. updatedInPlaceMutatingMachine := inPlaceMutatingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInPlaceMutatingMachine), updatedInPlaceMutatingMachine)).To(Succeed()) - // Verify ManagedFields - g.Expect(updatedInPlaceMutatingMachine.ManagedFields).Should( - ContainElement(ssa.MatchManagedFieldsEntry(machineSetManagerName, metav1.ManagedFieldsOperationApply)), - "in-place mutable machine should contain an entry for SSA manager", - ) - g.Expect(updatedInPlaceMutatingMachine.ManagedFields).ShouldNot( - ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)), - "in-place mutable machine should not contain an entry for old manager", - ) - - // The InfrastructureMachine should have ownership of "labels" and "annotations" transferred to - // "capi-machineset" manager. + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInPlaceMutatingMachine), updatedInPlaceMutatingMachine)).To(Succeed()) + statusManagedFields := managedFieldsMatching(updatedInPlaceMutatingMachine.ManagedFields, "manager", metav1.ManagedFieldsOperationUpdate, "status") + g.Expect(statusManagedFields).To(HaveLen(1)) + g.Expect(cleanupTime(updatedInPlaceMutatingMachine.ManagedFields)).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-machineset owns almost everything. + Manager: machineSetManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: fmt.Sprintf("{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:preserved-annotation\":{}},\"f:finalizers\":{\"v:\\\"machine.cluster.x-k8s.io\\\"\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/deployment-name\":{},\"f:cluster.x-k8s.io/set-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}},\"f:ownerReferences\":{\"k:{\\\"uid\\\":\\\"%s\\\"}\":{}}},\"f:spec\":{\"f:bootstrap\":{\"f:configRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}}},\"f:clusterName\":{},\"f:infrastructureRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}},\"f:version\":{}}}", ms.UID), + }, { + // manager owns the finalizer. + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: "{\"f:metadata\":{\"f:finalizers\":{\".\":{},\"v:\\\"machine.cluster.x-k8s.io\\\"\":{}}}}", + }, { + // manager owns status. + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: clusterv1.GroupVersion.String(), + // No need to verify details (a bit non-deterministic, as it depends on Machine controller reconciles). + FieldsV1: string(statusManagedFields[0].FieldsV1.Raw), + Subresource: "status", + }}))) + }, timeout).Should(Succeed()) + updatedInfraMachine := infraMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) + g.Expect(cleanupTime(updatedInfraMachine.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-machineset-metadata owns labels and annotations. + Manager: machineSetMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: updatedInfraMachine.GetAPIVersion(), + FieldsV1: "{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:preserved-annotation\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/deployment-name\":{},\"f:cluster.x-k8s.io/set-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}}}}", + }, { + // capi-machineset owns spec. + Manager: machineSetManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: updatedInfraMachine.GetAPIVersion(), + FieldsV1: "{\"f:spec\":{\"f:infra-field\":{}}}", + }, { + // manager owns ClusterName label and ownerReferences. + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: updatedInfraMachine.GetAPIVersion(), + FieldsV1: fmt.Sprintf("{\"f:metadata\":{\"f:labels\":{\"f:cluster.x-k8s.io/cluster-name\":{}},\"f:ownerReferences\":{\".\":{},\"k:{\\\"uid\\\":\\\"%s\\\"}\":{}}}}", inPlaceMutatingMachine.UID), + }}))) + }, timeout).Should(Succeed()) - // Verify ManagedFields - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(machineSetManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(machineSetManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"})) - g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"})) - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"})) - - // The BootstrapConfig should have ownership of "labels" and "annotations" transferred to - // "capi-machineset" manager. updatedBootstrapConfig := bootstrapConfig.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedBootstrapConfig), updatedBootstrapConfig)).To(Succeed()) - - // Verify ManagedFields - g.Expect(updatedBootstrapConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(machineSetManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedBootstrapConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(machineSetManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"})) - g.Expect(updatedBootstrapConfig.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedBootstrapConfig.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"})) - g.Expect(updatedBootstrapConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"})) + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedBootstrapConfig), updatedBootstrapConfig)).To(Succeed()) + g.Expect(cleanupTime(updatedBootstrapConfig.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-machineset-metadata owns labels and annotations. + Manager: machineSetMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: updatedBootstrapConfig.GetAPIVersion(), + FieldsV1: "{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:preserved-annotation\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/deployment-name\":{},\"f:cluster.x-k8s.io/set-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}}}}", + }, { + // capi-machineset owns spec. + Manager: machineSetManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: updatedBootstrapConfig.GetAPIVersion(), + FieldsV1: "{\"f:spec\":{\"f:bootstrap-field\":{}}}", + }, { + // manager owns ClusterName label and ownerReferences. + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: updatedBootstrapConfig.GetAPIVersion(), + FieldsV1: fmt.Sprintf("{\"f:metadata\":{\"f:labels\":{\"f:cluster.x-k8s.io/cluster-name\":{}},\"f:ownerReferences\":{\".\":{},\"k:{\\\"uid\\\":\\\"%s\\\"}\":{}}}}", inPlaceMutatingMachine.UID), + }}))) + }, timeout).Should(Succeed()) // // Verify In-place mutating fields // - // Update the MachineSet and verify the in-mutating fields are propagated. + // Update the MachineSet and verify the in-place mutating fields are propagated. ms.Spec.Template.Labels = map[string]string{ "preserved-label": "preserved-value", // Keep the label and value as is "modified-label": "modified-value-2", // Modify the value of the label @@ -1415,115 +1553,77 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { // Verify in-place mutable fields are updated on the Machine. updatedInPlaceMutatingMachine = inPlaceMutatingMachine.DeepCopy() - g.Eventually(func(g Gomega) { - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInPlaceMutatingMachine), updatedInPlaceMutatingMachine)).To(Succeed()) - // Verify Labels - g.Expect(updatedInPlaceMutatingMachine.Labels).Should(Equal(expectedLabels)) - // Verify Annotations - g.Expect(updatedInPlaceMutatingMachine.Annotations).Should(Equal(ms.Spec.Template.Annotations)) - // Verify Node timeout values - g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds)), - )) - g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds)), - )) - g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)), - )) - g.Expect(updatedInPlaceMutatingMachine.Spec.MinReadySeconds).Should(And( - Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.MinReadySeconds)), - )) - // Verify readiness gates. - g.Expect(updatedInPlaceMutatingMachine.Spec.ReadinessGates).Should(Equal(readinessGates)) - }, timeout).Should(Succeed()) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInPlaceMutatingMachine), updatedInPlaceMutatingMachine)).To(Succeed()) + g.Expect(updatedInPlaceMutatingMachine.Labels).Should(Equal(expectedLabels)) + g.Expect(updatedInPlaceMutatingMachine.Annotations).Should(Equal(ms.Spec.Template.Annotations)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(Equal(ms.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(Equal(ms.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(Equal(ms.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)) + g.Expect(updatedInPlaceMutatingMachine.Spec.MinReadySeconds).Should(Equal(ms.Spec.Template.Spec.MinReadySeconds)) + g.Expect(updatedInPlaceMutatingMachine.Spec.ReadinessGates).Should(Equal(readinessGates)) // Verify in-place mutable fields are updated on InfrastructureMachine updatedInfraMachine = infraMachine.DeepCopy() - g.Eventually(func(g Gomega) { - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) - // Verify Labels - g.Expect(updatedInfraMachine.GetLabels()).Should(Equal(expectedLabels)) - // Verify Annotations - g.Expect(updatedInfraMachine.GetAnnotations()).Should(Equal(ms.Spec.Template.Annotations)) - // Verify spec remains the same - g.Expect(updatedInfraMachine.Object).Should(HaveKeyWithValue("spec", infraMachineSpec)) - }, timeout).Should(Succeed()) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) + g.Expect(updatedInfraMachine.GetLabels()).Should(Equal(expectedLabels)) + g.Expect(updatedInfraMachine.GetAnnotations()).Should(Equal(ms.Spec.Template.Annotations)) + // Verify spec remains the same + g.Expect(updatedInfraMachine.Object).Should(HaveKeyWithValue("spec", infraMachineSpec)) // Verify in-place mutable fields are updated on the BootstrapConfig. updatedBootstrapConfig = bootstrapConfig.DeepCopy() - g.Eventually(func(g Gomega) { - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedBootstrapConfig), updatedBootstrapConfig)).To(Succeed()) - // Verify Labels - g.Expect(updatedBootstrapConfig.GetLabels()).Should(Equal(expectedLabels)) - // Verify Annotations - g.Expect(updatedBootstrapConfig.GetAnnotations()).Should(Equal(ms.Spec.Template.Annotations)) - // Verify spec remains the same - g.Expect(updatedBootstrapConfig.Object).Should(HaveKeyWithValue("spec", bootstrapConfigSpec)) - }, timeout).Should(Succeed()) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedBootstrapConfig), updatedBootstrapConfig)).To(Succeed()) + g.Expect(updatedBootstrapConfig.GetLabels()).Should(Equal(expectedLabels)) + g.Expect(updatedBootstrapConfig.GetAnnotations()).Should(Equal(ms.Spec.Template.Annotations)) + // Verify spec remains the same + g.Expect(updatedBootstrapConfig.Object).Should(HaveKeyWithValue("spec", bootstrapConfigSpec)) - // Wait to ensure Machine is not updated. - // Verify that the machine stays the same consistently. - g.Consistently(func(g Gomega) { - // The deleting machine should not change. + // Verify ManagedFields + g.Eventually(func(g Gomega) { updatedDeletingMachine := deletingMachine.DeepCopy() g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) + statusManagedFields := managedFieldsMatching(updatedInPlaceMutatingMachine.ManagedFields, "manager", metav1.ManagedFieldsOperationUpdate, "status") + g.Expect(statusManagedFields).To(HaveLen(1)) + g.Expect(cleanupTime(updatedDeletingMachine.ManagedFields)).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-machineset owns almost everything. + Manager: machineSetManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: "{\"f:metadata\":{\"f:finalizers\":{\"v:\\\"testing-finalizer\\\"\":{}}},\"f:spec\":{\"f:bootstrap\":{\"f:dataSecretName\":{}},\"f:clusterName\":{},\"f:infrastructureRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}}}}", + }, { + // manager owns the fields that are propagated in-place for deleting Machines in syncMachines via patchHelper. + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: fmt.Sprintf("{\"f:metadata\":{\"f:ownerReferences\":{\".\":{},\"k:{\\\"uid\\\":\\\"%s\\\"}\":{}}},\"f:spec\":{\"f:deletion\":{\"f:nodeDrainTimeoutSeconds\":{},\"f:nodeVolumeDetachTimeoutSeconds\":{}},\"f:minReadySeconds\":{},\"f:readinessGates\":{\".\":{},\"k:{\\\"conditionType\\\":\\\"foo\\\"}\":{\".\":{},\"f:conditionType\":{}}}}}", testCluster.UID), + }, { + // manager owns status. + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: clusterv1.GroupVersion.String(), + // No need to verify details (a bit non-deterministic, as it depends on Machine controller reconciles). + FieldsV1: string(statusManagedFields[0].FieldsV1.Raw), + Subresource: "status", + }}))) + }, timeout).Should(Succeed()) - // Verify ManagedFields - g.Expect(updatedDeletingMachine.ManagedFields).Should( - ContainElement(ssa.MatchManagedFieldsEntry(machineSetManagerName, metav1.ManagedFieldsOperationApply)), - "deleting machine should contain an entry for SSA manager", - ) - g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot( - ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)), - "deleting machine should not contain an entry for old manager", - ) - - // Verify in-place mutable fields are still the same. - g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) - g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) - g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(Equal(deletingMachine.Spec.Deletion.NodeDrainTimeoutSeconds)) - g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(Equal(deletingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds)) - g.Expect(updatedDeletingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(Equal(deletingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)) - g.Expect(updatedDeletingMachine.Spec.MinReadySeconds).Should(Equal(deletingMachine.Spec.MinReadySeconds)) - }, 5*time.Second).Should(Succeed()) - - // Verify in-place mutable fields are updated on the deleting machine - ms.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds = duration11s - ms.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds = duration11s - ms.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = duration11s - ms.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](11) - s = &scope{ - machineSet: ms, - machines: []*clusterv1.Machine{updatedInPlaceMutatingMachine, deletingMachine}, - getAndAdoptMachinesForMachineSetSucceeded: true, - } - _, err = reconciler.syncMachines(ctx, s) - g.Expect(err).ToNot(HaveOccurred()) + // Verify in-place mutable fields are updated on the deleting Machine. updatedDeletingMachine := deletingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) - // Verify Node timeout values - g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds)), - )) - g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds)), - )) - g.Expect(updatedDeletingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)), - )) - g.Expect(updatedDeletingMachine.Spec.MinReadySeconds).Should(And( - Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.MinReadySeconds)), - )) + g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) // Not propagated to a deleting Machine + g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) // Not propagated to a deleting Machine + g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(Equal(ms.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds)) + g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(Equal(ms.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds)) + g.Expect(updatedDeletingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(Equal(ms.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)) + g.Expect(updatedDeletingMachine.Spec.MinReadySeconds).Should(Equal(ms.Spec.Template.Spec.MinReadySeconds)) + g.Expect(updatedDeletingMachine.Spec.ReadinessGates).Should(Equal(readinessGates)) + // Verify the machine spec is otherwise unchanged. + deletingMachine.Spec.Deletion.NodeDrainTimeoutSeconds = ms.Spec.Template.Spec.Deletion.NodeDrainTimeoutSeconds + deletingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds = ms.Spec.Template.Spec.Deletion.NodeDeletionTimeoutSeconds + deletingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = ms.Spec.Template.Spec.Deletion.NodeVolumeDetachTimeoutSeconds + deletingMachine.Spec.MinReadySeconds = ms.Spec.Template.Spec.MinReadySeconds + deletingMachine.Spec.ReadinessGates = ms.Spec.Template.Spec.ReadinessGates + g.Expect(updatedDeletingMachine.Spec).Should(BeComparableTo(deletingMachine.Spec)) } func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { @@ -2287,17 +2387,24 @@ func TestMachineSetReconciler_syncReplicas_WithErrors(t *testing.T) { g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(builder.GenericBootstrapConfigTemplateCRD, builder.GenericInfrastructureMachineTemplateCRD).WithInterceptorFuncs(interceptor.Funcs{ - Create: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.CreateOption) error { + Apply: func(ctx context.Context, c client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { // simulate scenarios where infra object creation fails - if obj.GetObjectKind().GroupVersionKind().Kind == "GenericInfrastructureMachine" { + clientObject, ok := obj.(client.Object) + if !ok { + return errors.Errorf("error during object creation: unexpected ApplyConfiguration") + } + if clientObject.GetObjectKind().GroupVersionKind().Kind == "GenericInfrastructureMachine" { return fmt.Errorf("inject error for create machineInfrastructure") } - return client.Create(ctx, obj, opts...) + return c.Apply(ctx, obj, opts...) }, }).Build() r := &Reconciler{ Client: fakeClient, + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + disableRemoveManagedFieldsForLabelsAndAnnotations: true, } testCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -3164,3 +3271,49 @@ func TestSortMachinesToRemediate(t *testing.T) { g.Expect(machines).To(Equal(append(unhealthyMachinesWithAnnotations, unhealthyMachines...))) }) } + +func cleanupTime(fields []metav1.ManagedFieldsEntry) []metav1.ManagedFieldsEntry { + for i := range fields { + fields[i].Time = nil + } + return fields +} + +type managedFieldEntry struct { + Manager string + Operation metav1.ManagedFieldsOperationType + APIVersion string + FieldsV1 string + Subresource string +} + +func toManagedFields(managedFields []managedFieldEntry) []metav1.ManagedFieldsEntry { + res := []metav1.ManagedFieldsEntry{} + for _, f := range managedFields { + res = append(res, metav1.ManagedFieldsEntry{ + Manager: f.Manager, + Operation: f.Operation, + APIVersion: f.APIVersion, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(trimSpaces(f.FieldsV1))}, + Subresource: f.Subresource, + }) + } + return res +} + +func trimSpaces(s string) string { + s = strings.ReplaceAll(s, "\n", "") + s = strings.ReplaceAll(s, "\t", "") + return s +} + +func managedFieldsMatching(managedFields []metav1.ManagedFieldsEntry, manager string, operation metav1.ManagedFieldsOperationType, subresource string) []metav1.ManagedFieldsEntry { + res := []metav1.ManagedFieldsEntry{} + for _, f := range managedFields { + if f.Manager == manager && f.Operation == operation && f.Subresource == subresource { + res = append(res, f) + } + } + return res +} diff --git a/internal/controllers/machineset/suite_test.go b/internal/controllers/machineset/suite_test.go index c8122b40b50e..d29a5a26d2fd 100644 --- a/internal/controllers/machineset/suite_test.go +++ b/internal/controllers/machineset/suite_test.go @@ -96,14 +96,18 @@ func TestMain(m *testing.M) { }() if err := (&Reconciler{ - Client: mgr.GetClient(), + // Note: Ensure the fieldManager defaults to manager like in prod. + // Otherwise it defaults to the binary name which is not manager in tests. + Client: client.WithFieldOwner(mgr.GetClient(), "manager"), APIReader: mgr.GetAPIReader(), ClusterCache: clusterCache, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to start MMachineSetReconciler: %v", err)) } if err := (&machinecontroller.Reconciler{ - Client: mgr.GetClient(), + // Note: Ensure the fieldManager defaults to manager like in prod. + // Otherwise it defaults to the binary name which is not manager in tests. + Client: client.WithFieldOwner(mgr.GetClient(), "manager"), APIReader: mgr.GetAPIReader(), ClusterCache: clusterCache, RemoteConditionsGracePeriod: 5 * time.Minute, diff --git a/internal/util/ssa/managedfields.go b/internal/util/ssa/managedfields.go index d3725e53894b..b41a04620f43 100644 --- a/internal/util/ssa/managedfields.go +++ b/internal/util/ssa/managedfields.go @@ -18,88 +18,213 @@ limitations under the License. package ssa import ( + "bytes" "context" "encoding/json" + "time" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/internal/contract" ) const classicManager = "manager" -// DropManagedFields modifies the managedFields entries on the object that belong to "manager" (Operation=Update) -// to drop ownership of the given paths if there is no field yet that is managed by `ssaManager`. -// -// If we want to be able to drop fields that were previously owned by the "manager" we have to ensure that -// fields are not co-owned by "manager" and `ssaManager`. Otherwise, when we drop the fields with SSA -// (i.e. `ssaManager`) the fields would remain as they are still owned by "manager". -// The following code will do a one-time update on the managed fields. -// We won't do this on subsequent reconciles. This case will be identified by checking if `ssaManager` owns any fields. -// Dropping ownership in paths for existing "manager" entries (which could also be from other controllers) is safe, -// as we assume that if other controllers are still writing fields on the object they will just do it again and thus -// gain ownership again. -func DropManagedFields(ctx context.Context, c client.Client, obj client.Object, ssaManager string, paths []contract.Path) error { - // Return if `ssaManager` already owns any fields. - if hasFieldsManagedBy(obj, ssaManager) { +// RemoveManagedFieldsForLabelsAndAnnotations removes labels and annotations from managedFields entries. +// This func is e.g. used in the following context: +// * First a full object is created with fieldManager. +// * Then this func is called to drop ownership for labels and annotations +// * And then in a subsequent syncMachines call a metadataFieldManager can take ownership for labels and annotations. +// Note: This is done so that this func does not rely on managedFields being stored in the cache, so we can optimize +// memory usage by dropping managedFields before storing objects in the cache. +func RemoveManagedFieldsForLabelsAndAnnotations(ctx context.Context, c client.Client, apiReader client.Reader, object client.Object, fieldManager string) error { + objectKey := client.ObjectKeyFromObject(object) + objectGVK, err := apiutil.GVKForObject(object, c.Scheme()) + if err != nil { + return errors.Wrapf(err, "failed to remove managedFields for labels and annotations from object %s", + klog.KRef(objectKey.Namespace, objectKey.Name)) + } + + var getObjectFromAPIServer bool + err = retry.RetryOnConflict(wait.Backoff{ + Steps: 3, + Duration: 10 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, + }, func() error { + // First try is with the passed in object. After that get the object from the apiserver. + // Note: This is done so that this func does not rely on managedFields being stored in the cache, so we can optimize + // memory usage by dropping managedFields before storing objects in the cache. + if getObjectFromAPIServer { + if err := apiReader.Get(ctx, objectKey, object); err != nil { + return err + } + } else { + getObjectFromAPIServer = true + } + + base := object.DeepCopyObject().(client.Object) + + // Modify managedFields for manager=fieldManager and operation=Apply to drop ownership for labels and annotations. + originalManagedFields := object.GetManagedFields() + managedFields := make([]metav1.ManagedFieldsEntry, 0, len(originalManagedFields)) + for _, managedField := range originalManagedFields { + if managedField.Manager == fieldManager && + managedField.Operation == metav1.ManagedFieldsOperationApply && + managedField.Subresource == "" { + // If fieldManager does not own labels and annotations there's nothing to do. + if !bytes.Contains(managedField.FieldsV1.Raw, []byte("f:metadata")) { + return nil + } + + // Unmarshal the managed fields into a map[string]interface{} + fieldsV1 := map[string]interface{}{} + if err := json.Unmarshal(managedField.FieldsV1.Raw, &fieldsV1); err != nil { + return errors.Wrap(err, "failed to unmarshal managed fields") + } + + // Filter out the ownership for labels and annotations. + deletedFields := FilterIntent(&FilterIntentInput{ + Path: contract.Path{}, + Value: fieldsV1, + ShouldFilter: IsPathIgnored([]contract.Path{ + {"f:metadata", "f:annotations"}, + {"f:metadata", "f:labels"}, + }), + }) + if !deletedFields { + // If fieldManager did not own labels and annotations there's nothing to do. + return nil + } + + fieldsV1Raw, err := json.Marshal(fieldsV1) + if err != nil { + return errors.Wrap(err, "failed to marshal managed fields") + } + managedField.FieldsV1.Raw = fieldsV1Raw + + managedFields = append(managedFields, managedField) + } else { + // Do not modify the entry. Use as is. + managedFields = append(managedFields, managedField) + } + } + object.SetManagedFields(managedFields) + + // Use optimistic locking to avoid accidentally rolling back managedFields. + return c.Patch(ctx, object, client.MergeFromWithOptions(base, client.MergeFromWithOptimisticLock{})) + }) + if err != nil { + return errors.Wrapf(err, "failed to remove managedFields for labels and annotations from %s %s", + objectGVK.Kind, klog.KRef(objectKey.Namespace, objectKey.Name)) + } + return nil +} + +// MigrateManagedFields migrates managedFields. +// ManagedFields are only migrated if fieldManager owns labels+annotations +// The migration deletes all non-status managedField entries for fieldManager:Apply and manager:Update. +// Additionally it adds a seed entry for metadataFieldManager. +// Note: We have to call this func for every Machine created with CAPI <= v1.11 once. +// Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can +// be removed with CAPI v1.15. +func MigrateManagedFields(ctx context.Context, c client.Client, object client.Object, fieldManager, metadataFieldManager string) error { + objectKey := client.ObjectKeyFromObject(object) + objectGVK, err := apiutil.GVKForObject(object, c.Scheme()) + if err != nil { + return errors.Wrapf(err, "failed to migrate managedFields for object %s", + klog.KRef(objectKey.Namespace, objectKey.Name)) + } + + // Check if a migration is still needed. This should be only done once per object. + needsMigration, err := needsMigration(object, fieldManager) + if err != nil { + return errors.Wrapf(err, "failed to migrate managedFields for %s %s", + objectGVK.Kind, klog.KRef(objectKey.Namespace, objectKey.Name)) + } + if !needsMigration { return nil } - // Since there is no field managed by `ssaManager` it means that - // this is the first time this object is being processed after the controller calling this function - // started to use SSA patches. - // It is required to clean-up managedFields from entries created by the regular patches. - // This will ensure that `ssaManager` will be able to modify the fields that - // were originally owned by "manager". - base := obj.DeepCopyObject().(client.Object) - - // Modify managedFieldEntry for manager=manager and operation=update to drop ownership - // for the given paths to avoid having two managers holding values. - originalManagedFields := obj.GetManagedFields() + base := object.DeepCopyObject().(client.Object) + + // Remove managedFields for fieldManager:Apply and manager:Update. + originalManagedFields := object.GetManagedFields() managedFields := make([]metav1.ManagedFieldsEntry, 0, len(originalManagedFields)) for _, managedField := range originalManagedFields { + if managedField.Manager == fieldManager && + managedField.Operation == metav1.ManagedFieldsOperationApply && + managedField.Subresource == "" { + continue + } if managedField.Manager == classicManager && - managedField.Operation == metav1.ManagedFieldsOperationUpdate { + managedField.Operation == metav1.ManagedFieldsOperationUpdate && + managedField.Subresource == "" { + continue + } + managedFields = append(managedFields, managedField) + } + + // Add a seeding managedField entry to prevent SSA to create/infer a default managedField entry when the + // first SSA is applied. + // If an existing object doesn't have managedFields when applying the first SSA the API server + // creates an entry with operation=Update (guessing where the object comes from), but this entry ends up + // acting as a co-ownership and we want to prevent this. + // NOTE: fieldV1Map cannot be empty, so we add metadata.name which will be cleaned up at the first + // SSA patch of the same manager (updateLabelsAndAnnotations directly after calling this func). + managedFields = append(managedFields, metav1.ManagedFieldsEntry{ + Manager: metadataFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: objectGVK.GroupVersion().String(), + Time: ptr.To(metav1.Now()), + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:name":{}}}`)}, + }) + + object.SetManagedFields(managedFields) + + // Use optimistic locking to avoid accidentally rolling back managedFields. + if err := c.Patch(ctx, object, client.MergeFromWithOptions(base, client.MergeFromWithOptimisticLock{})); err != nil { + return errors.Wrapf(err, "failed to migrate managedFields for %s %s", + objectGVK.Kind, klog.KRef(objectKey.Namespace, objectKey.Name)) + } + return nil +} + +// needsMigration returns true if fieldManager:Apply owns the clusterv1.ClusterNameLabel. +func needsMigration(object client.Object, fieldManager string) (bool, error) { + for _, managedField := range object.GetManagedFields() { + //nolint:gocritic // much easier to read this way, not going to use continue instead + if managedField.Manager == fieldManager && + managedField.Operation == metav1.ManagedFieldsOperationApply && + managedField.Subresource == "" { + // If fieldManager does not own the cluster-name label migration is not needed + if !bytes.Contains(managedField.FieldsV1.Raw, []byte("f:cluster.x-k8s.io/cluster-name")) { + return false, nil + } + // Unmarshal the managed fields into a map[string]interface{} fieldsV1 := map[string]interface{}{} if err := json.Unmarshal(managedField.FieldsV1.Raw, &fieldsV1); err != nil { - return errors.Wrap(err, "failed to unmarshal managed fields") + return false, errors.Wrap(err, "failed to determine if migration is needed: failed to unmarshal managed fields") } - // Filter out the ownership for the given paths. - FilterIntent(&FilterIntentInput{ - Path: contract.Path{}, - Value: fieldsV1, - ShouldFilter: IsPathIgnored(paths), - }) - - fieldsV1Raw, err := json.Marshal(fieldsV1) + // Note: MigrateManagedFields is only called for BootstrapConfig/InfraMachine and both always have the cluster-name label set. + _, containsClusterNameLabel, err := unstructured.NestedFieldNoCopy(fieldsV1, "f:metadata", "f:labels", "f:cluster.x-k8s.io/cluster-name") if err != nil { - return errors.Wrap(err, "failed to marshal managed fields") + return false, errors.Wrapf(err, "failed to determine if migration is needed: failed to determine if managed field contains the %s label", clusterv1.ClusterNameLabel) } - managedField.FieldsV1.Raw = fieldsV1Raw - - managedFields = append(managedFields, managedField) - } else { - // Do not modify the entry. Use as is. - managedFields = append(managedFields, managedField) - } - } - - obj.SetManagedFields(managedFields) - - return c.Patch(ctx, obj, client.MergeFrom(base)) -} - -// hasFieldsManagedBy returns true if any of the fields in obj are managed by manager. -func hasFieldsManagedBy(obj client.Object, manager string) bool { - managedFields := obj.GetManagedFields() - for _, mf := range managedFields { - if mf.Manager == manager { - return true + return containsClusterNameLabel, nil } } - return false + return false, nil } diff --git a/internal/util/ssa/managedfields_test.go b/internal/util/ssa/managedfields_test.go index 24ea8eabad62..e78973b62404 100644 --- a/internal/util/ssa/managedfields_test.go +++ b/internal/util/ssa/managedfields_test.go @@ -19,251 +19,587 @@ package ssa import ( "context" - "encoding/json" + "strings" "testing" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/cluster-api/internal/contract" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) -func TestDropManagedFields(t *testing.T) { - ctx := context.Background() - - ssaManager := "ssa-manager" +func TestRemoveManagedFieldsForLabelsAndAnnotations(t *testing.T) { + testFieldManager := "ssa-manager" + kubeadmConfig := &bootstrapv1.KubeadmConfig{ + // Have to set TypeMeta explicitly when using SSA with typed objects. + TypeMeta: metav1.TypeMeta{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "KubeadmConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadmconfig-1", + Namespace: "default", + Finalizers: []string{"test.com/finalizer"}, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.CloudConfig, + }, + } + kubeadmConfigWithoutFinalizer := kubeadmConfig.DeepCopy() + kubeadmConfigWithoutFinalizer.Finalizers = nil + kubeadmConfigWithLabelsAndAnnotations := kubeadmConfig.DeepCopy() + kubeadmConfigWithLabelsAndAnnotations.Labels = map[string]string{ + "label-1": "value-1", + } + kubeadmConfigWithLabelsAndAnnotations.Annotations = map[string]string{ + "annotation-1": "value-1", + } tests := []struct { - name string - updateManager string - obj client.Object - wantOwnershipToDrop bool + name string + kubeadmConfig *bootstrapv1.KubeadmConfig + statusWriteAfterCreate bool + expectedManagedFieldsAfterCreate []managedFieldEntry + expectedManagedFieldsAfterStatusWrite []managedFieldEntry + expectedManagedFieldsAfterRemoval []managedFieldEntry }{ { - name: "should drop ownership of fields if there is no entry for ssaManager", - wantOwnershipToDrop: true, + name: "no-op if there are no managedFields for labels and annotations", + // Note: This case should never happen, but using it to test the no-op code path. + kubeadmConfig: kubeadmConfig.DeepCopy(), + // Note: After create testFieldManager should own all fields. + expectedManagedFieldsAfterCreate: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:finalizers":{ + "v:\"test.com/finalizer\"":{} + } +}, +"f:spec":{ + "f:format":{} +}}`, + }}, + // Note: Expect no change. + expectedManagedFieldsAfterRemoval: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:finalizers":{ + "v:\"test.com/finalizer\"":{} + } +}, +"f:spec":{ + "f:format":{} +}}`, + }}, + }, + { + name: "no-op if there are no managedFields for labels and annotations (not even metadata)", + // Note: This case should never happen, but using it to test the no-op code path. + kubeadmConfig: kubeadmConfigWithoutFinalizer.DeepCopy(), + // Note: After create testFieldManager should own all fields. + expectedManagedFieldsAfterCreate: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:spec":{ + "f:format":{} +}}`, + }}, + // Note: Expect no change. + expectedManagedFieldsAfterRemoval: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:spec":{ + "f:format":{} +}}`, + }}, }, { - name: "should not drop ownership of fields if there is an entry for ssaManager", - updateManager: ssaManager, - wantOwnershipToDrop: false, + name: "Remove managedFields for labels and annotations", + kubeadmConfig: kubeadmConfigWithLabelsAndAnnotations.DeepCopy(), + // Note: After create testFieldManager should own all fields. + expectedManagedFieldsAfterCreate: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{} + }, + "f:finalizers":{ + "v:\"test.com/finalizer\"":{} + }, + "f:labels":{ + "f:label-1":{} + } +}, +"f:spec":{ + "f:format":{} +}}`, + }}, + // Note: After removal testFieldManager should not own labels and annotations anymore. + expectedManagedFieldsAfterRemoval: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:finalizers":{ + "v:\"test.com/finalizer\"":{} + } +}, +"f:spec":{ + "f:format":{} +}}`, + }}, + }, + { + name: "Remove managedFields for labels and annotations, preserve status fields and retry on conflict", + kubeadmConfig: kubeadmConfigWithLabelsAndAnnotations.DeepCopy(), + statusWriteAfterCreate: true, + // Note: After create testFieldManager should own all fields. + expectedManagedFieldsAfterCreate: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{} + }, + "f:finalizers":{ + "v:\"test.com/finalizer\"":{} + }, + "f:labels":{ + "f:label-1":{} + } +}, +"f:spec":{ + "f:format":{} +}}`, + }}, + // Note: After status write there is an additional entry for status. + expectedManagedFieldsAfterStatusWrite: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{} + }, + "f:finalizers":{ + "v:\"test.com/finalizer\"":{} + }, + "f:labels":{ + "f:label-1":{} + } +}, +"f:spec":{ + "f:format":{} +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:status":{ + ".":{}, + "f:initialization":{ + ".":{}, + "f:dataSecretCreated":{} + } +}}`, + Subresource: "status", + }}, + // Note: After removal testFieldManager should not own labels and annotations anymore + // additionally the status entry is preserved and because of the status write + // the client will use the retry on conflict code path. + expectedManagedFieldsAfterRemoval: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:finalizers":{ + "v:\"test.com/finalizer\"":{} + } +}, +"f:spec":{ + "f:format":{} +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:status":{ + ".":{}, + "f:initialization":{ + ".":{}, + "f:dataSecretCreated":{} + } +}}`, + Subresource: "status", + }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() g := NewWithT(t) - createCM := &corev1.ConfigMap{ - // Have to set TypeMeta explicitly when using SSA with typed objects. - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "cm-1", - Namespace: "default", - Labels: map[string]string{ - "label-1": "value-1", - }, - Annotations: map[string]string{ - "annotation-1": "value-1", - }, - Finalizers: []string{"test.com/finalizer"}, - }, - Data: map[string]string{ - "test-key": "test-value", - }, - } - updateCM := createCM.DeepCopy() - updateCM.Data["test-key-update"] = "test-value-update" - - g.Expect(env.Client.Create(ctx, createCM, client.FieldOwner(classicManager))).To(Succeed()) + // Note: This func is called exactly the same way as it is called when creating BootstrapConfig/InfraMachine in KCP/MS. + g.Expect(Patch(ctx, env.Client, testFieldManager, tt.kubeadmConfig)).To(Succeed()) t.Cleanup(func() { - createCMWithoutFinalizer := createCM.DeepCopy() - createCMWithoutFinalizer.ObjectMeta.Finalizers = []string{} - g.Expect(env.Client.Patch(ctx, createCMWithoutFinalizer, client.MergeFrom(createCM))).To(Succeed()) - g.Expect(env.CleanupAndWait(ctx, createCM)).To(Succeed()) + ctx := context.Background() + objWithoutFinalizer := tt.kubeadmConfig.DeepCopyObject().(client.Object) + objWithoutFinalizer.SetFinalizers([]string{}) + g.Expect(env.Client.Patch(ctx, objWithoutFinalizer, client.MergeFrom(tt.kubeadmConfig))).To(Succeed()) + g.Expect(env.CleanupAndWait(ctx, tt.kubeadmConfig)).To(Succeed()) }) + g.Expect(cleanupTime(tt.kubeadmConfig.GetManagedFields())).To(BeComparableTo(toManagedFields(tt.expectedManagedFieldsAfterCreate))) - if tt.updateManager != "" { - // If updateManager is set, update the object with SSA - g.Expect(env.Client.Patch(ctx, updateCM, client.Apply, client.FieldOwner(tt.updateManager))).To(Succeed()) - } - - gotObj := createCM.DeepCopyObject().(client.Object) - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(createCM), gotObj)).To(Succeed()) - labelsAndAnnotationsManagedFieldPaths := []contract.Path{ - {"f:metadata", "f:annotations"}, - {"f:metadata", "f:labels"}, + if tt.statusWriteAfterCreate { + // Ensure we don't update resourceVersion in tt.kubeadmConfig so RemoveManagedFieldsForLabelsAndAnnotations + // below encounters a conflict and uses the retry on conflict code path. + kubeadmConfig := tt.kubeadmConfig.DeepCopyObject().(*bootstrapv1.KubeadmConfig) + origKubeadmConfig := kubeadmConfig.DeepCopyObject().(*bootstrapv1.KubeadmConfig) + kubeadmConfig.Status.Initialization.DataSecretCreated = ptr.To(true) + g.Expect(env.Client.Status().Patch(ctx, kubeadmConfig, client.MergeFrom(origKubeadmConfig), client.FieldOwner(classicManager))).To(Succeed()) + g.Expect(cleanupTime(kubeadmConfig.GetManagedFields())).To(BeComparableTo(toManagedFields(tt.expectedManagedFieldsAfterStatusWrite))) } - g.Expect(DropManagedFields(ctx, env.Client, gotObj, ssaManager, labelsAndAnnotationsManagedFieldPaths)).To(Succeed()) - if tt.wantOwnershipToDrop { - g.Expect(gotObj.GetManagedFields()).ShouldNot(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:labels"}, - )) - g.Expect(gotObj.GetManagedFields()).ShouldNot(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:annotations"}, - )) - } else { - g.Expect(gotObj.GetManagedFields()).Should(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:labels"}, - )) - g.Expect(gotObj.GetManagedFields()).Should(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:annotations"}, - )) - } - // Verify ownership of other fields is not affected. - g.Expect(gotObj.GetManagedFields()).Should(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:finalizers"}, - )) + // Note: This func is called exactly the same way as it is called when creating BootstrapConfig/InfraMachine in KCP/MS. + g.Expect(RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), tt.kubeadmConfig, testFieldManager)).To(Succeed()) + g.Expect(cleanupTime(tt.kubeadmConfig.GetManagedFields())).To(BeComparableTo(toManagedFields(tt.expectedManagedFieldsAfterRemoval))) }) } } -func TestDropManagedFieldsWithFakeClient(t *testing.T) { - ctx := context.Background() - - ssaManager := "ssa-manager" - - fieldV1Map := map[string]interface{}{ - "f:metadata": map[string]interface{}{ - "f:name": map[string]interface{}{}, - "f:labels": map[string]interface{}{}, - "f:annotations": map[string]interface{}{}, - "f:finalizers": map[string]interface{}{}, +func TestMigrateManagedFields(t *testing.T) { + testFieldManager := "ssa-manager" + testMetadataFieldManager := "ssa-manager-metadata" + kubeadmConfig := &bootstrapv1.KubeadmConfig{ + // Have to set TypeMeta explicitly when using SSA with typed objects. + TypeMeta: metav1.TypeMeta{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "KubeadmConfig", }, - } - fieldV1, err := json.Marshal(fieldV1Map) - if err != nil { - panic(err) - } - - objWithoutSSAManager := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: "cm-1", - Namespace: "default", - ManagedFields: []metav1.ManagedFieldsEntry{{ - Manager: classicManager, - Operation: metav1.ManagedFieldsOperationUpdate, - FieldsType: "FieldsV1", - FieldsV1: &metav1.FieldsV1{Raw: fieldV1}, - APIVersion: "v1", - }}, - Labels: map[string]string{ - "label-1": "value-1", - }, - Annotations: map[string]string{ - "annotation-1": "value-1", - }, - Finalizers: []string{"test-finalizer"}, + Name: "kubeadmconfig-1", + Namespace: "default", + Finalizers: []string{"test.com/finalizer"}, }, - Data: map[string]string{ - "test-key": "test-value", + Spec: bootstrapv1.KubeadmConfigSpec{ + Format: bootstrapv1.CloudConfig, }, } - objectWithSSAManager := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cm-2", - Namespace: "default", - ManagedFields: []metav1.ManagedFieldsEntry{ - { - Manager: classicManager, - Operation: metav1.ManagedFieldsOperationUpdate, - FieldsType: "FieldsV1", - FieldsV1: &metav1.FieldsV1{Raw: fieldV1}, - APIVersion: "v1", - }, - { - Manager: ssaManager, - Operation: metav1.ManagedFieldsOperationApply, - FieldsType: "FieldsV1", - APIVersion: "v1", - }, - }, - Labels: map[string]string{ + tests := []struct { + name string + kubeadmConfig *bootstrapv1.KubeadmConfig + labels map[string]string + annotations map[string]string + statusWriteAfterCreate bool + updateLabelsAndAnnotationsAfterMigration bool + expectedManagedFieldsAfterCreate []managedFieldEntry + expectedManagedFieldsAfterStatusWrite []managedFieldEntry + expectedManagedFieldsAfterMigration []managedFieldEntry + expectedManagedFieldsAfterUpdatingLabelsAndAnnotations []managedFieldEntry + }{ + { + name: "no-op if there are no managedFields for the ClusterNameLabel", + // Note: This case should never happen, but using it to test the ClusterNameLabel check. + kubeadmConfig: kubeadmConfig.DeepCopy(), + labels: map[string]string{ "label-1": "value-1", }, - Annotations: map[string]string{ + annotations: map[string]string{ "annotation-1": "value-1", }, - Finalizers: []string{"test-finalizer"}, - }, - Data: map[string]string{ - "test-key": "test-value", - }, + expectedManagedFieldsAfterCreate: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{} + }, + "f:labels":{ + "f:label-1":{} } - - tests := []struct { - name string - obj client.Object - wantOwnershipToDrop bool - }{ - { - name: "should drop ownership of fields if there is no entry for ssaManager", - obj: objWithoutSSAManager, - wantOwnershipToDrop: true, +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:metadata":{ + "f:finalizers":{ + ".":{}, + "v:\"test.com/finalizer\"":{} + } +}, +"f:spec":{ + ".":{}, + "f:format":{} +}}`, + }}, + // Note: Expect no change. + expectedManagedFieldsAfterMigration: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{} + }, + "f:labels":{ + "f:label-1":{} + } +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:metadata":{ + "f:finalizers":{ + ".":{}, + "v:\"test.com/finalizer\"":{} + } +}, +"f:spec":{ + ".":{}, + "f:format":{} +}}`, + }}, }, { - name: "should not drop ownership of fields if there is an entry for ssaManager", - obj: objectWithSSAManager, - wantOwnershipToDrop: false, + name: "Remove managedFields if there are managedFields for the ClusterNameLabel and preserve status", + kubeadmConfig: kubeadmConfig.DeepCopy(), + labels: map[string]string{ + "label-1": "value-1", + clusterv1.ClusterNameLabel: "cluster-1", + }, + annotations: map[string]string{ + "annotation-1": "value-1", + }, + statusWriteAfterCreate: true, + updateLabelsAndAnnotationsAfterMigration: true, + // Note: After create testFieldManager should own labels and annotations and + // manager should own everything else (same as in CAPI <= v1.11). + expectedManagedFieldsAfterCreate: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{} + }, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:label-1":{} + } +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:metadata":{ + "f:finalizers":{ + ".":{}, + "v:\"test.com/finalizer\"":{} + } +}, +"f:spec":{ + ".":{}, + "f:format":{} +}}`, + }}, + // Note: After status write there is an additional entry for status. + expectedManagedFieldsAfterStatusWrite: []managedFieldEntry{{ + Manager: testFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{} + }, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:label-1":{} + } +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:metadata":{ + "f:finalizers":{ + ".":{}, + "v:\"test.com/finalizer\"":{} + } +}, +"f:spec":{ + ".":{}, + "f:format":{} +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:status":{ + ".":{}, + "f:initialization":{ + ".":{}, + "f:dataSecretCreated":{} + } +}}`, + Subresource: "status", + }}, + // Note: After migration the testMetadataFieldManager should have a seed entry, spec and annotation + // fields are orphaned and the status entry should be preserved + expectedManagedFieldsAfterMigration: []managedFieldEntry{{ + Manager: testMetadataFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:name":{} +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:status":{ + ".":{}, + "f:initialization":{ + ".":{}, + "f:dataSecretCreated":{} + } +}}`, + Subresource: "status", + }}, + // Note: After updating labels and annotations testMetadataFieldManager has a new entry for + // them (replacing the seed entry). + expectedManagedFieldsAfterUpdatingLabelsAndAnnotations: []managedFieldEntry{{ + Manager: testMetadataFieldManager, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{ + "f:annotation-1":{} + }, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:label-1":{} + } +}}`, + }, { + Manager: classicManager, + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsV1: `{ +"f:status":{ + ".":{}, + "f:initialization":{ + ".":{}, + "f:dataSecretCreated":{} + } +}}`, + Subresource: "status", + }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithObjects(tt.obj).WithReturnManagedFields().Build() - labelsAndAnnotationsManagedFieldPaths := []contract.Path{ - {"f:metadata", "f:annotations"}, - {"f:metadata", "f:labels"}, + + // Note: Create the object like it was created it in CAPI <= v1.11 (with manager). + g.Expect(env.Client.Create(ctx, tt.kubeadmConfig, client.FieldOwner(classicManager))).To(Succeed()) + t.Cleanup(func() { + ctx := context.Background() + objWithoutFinalizer := tt.kubeadmConfig.DeepCopyObject().(client.Object) + objWithoutFinalizer.SetFinalizers([]string{}) + g.Expect(env.Client.Patch(ctx, objWithoutFinalizer, client.MergeFrom(tt.kubeadmConfig))).To(Succeed()) + g.Expect(env.CleanupAndWait(ctx, tt.kubeadmConfig)).To(Succeed()) + }) + + // Note: Update labels and annotations like in CAPI <= v1.11 (with the "main" fieldManager). + updatedObject := &unstructured.Unstructured{} + updatedObject.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) + updatedObject.SetNamespace(tt.kubeadmConfig.GetNamespace()) + updatedObject.SetName(tt.kubeadmConfig.GetName()) + updatedObject.SetUID(tt.kubeadmConfig.GetUID()) + updatedObject.SetLabels(tt.labels) + updatedObject.SetAnnotations(tt.annotations) + g.Expect(Patch(ctx, env.Client, testFieldManager, updatedObject)).To(Succeed()) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.kubeadmConfig), tt.kubeadmConfig)).To(Succeed()) + g.Expect(cleanupTime(tt.kubeadmConfig.GetManagedFields())).To(BeComparableTo(toManagedFields(tt.expectedManagedFieldsAfterCreate))) + + if tt.statusWriteAfterCreate { + origKubeadmConfig := tt.kubeadmConfig.DeepCopyObject().(*bootstrapv1.KubeadmConfig) + tt.kubeadmConfig.Status.Initialization.DataSecretCreated = ptr.To(true) + g.Expect(env.Client.Status().Patch(ctx, tt.kubeadmConfig, client.MergeFrom(origKubeadmConfig), client.FieldOwner(classicManager))).To(Succeed()) + g.Expect(cleanupTime(tt.kubeadmConfig.GetManagedFields())).To(BeComparableTo(toManagedFields(tt.expectedManagedFieldsAfterStatusWrite))) } - g.Expect(DropManagedFields(ctx, fakeClient, tt.obj, ssaManager, labelsAndAnnotationsManagedFieldPaths)).To(Succeed()) - if tt.wantOwnershipToDrop { - g.Expect(tt.obj.GetManagedFields()).ShouldNot(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:labels"}, - )) - g.Expect(tt.obj.GetManagedFields()).ShouldNot(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:annotations"}, - )) - } else { - g.Expect(tt.obj.GetManagedFields()).Should(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:labels"}, - )) - g.Expect(tt.obj.GetManagedFields()).Should(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:annotations"}, - )) + + // Note: This func is called exactly the same way as it is called in syncMachines in KCP/MS. + g.Expect(MigrateManagedFields(ctx, env.Client, tt.kubeadmConfig, testFieldManager, testMetadataFieldManager)).To(Succeed()) + g.Expect(cleanupTime(tt.kubeadmConfig.GetManagedFields())).To(BeComparableTo(toManagedFields(tt.expectedManagedFieldsAfterMigration))) + + if tt.updateLabelsAndAnnotationsAfterMigration { + // Note: Update labels and annotations exactly the same way as it is done in syncMachines in KCP/MS. + updatedObject := &unstructured.Unstructured{} + updatedObject.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) + updatedObject.SetNamespace(tt.kubeadmConfig.GetNamespace()) + updatedObject.SetName(tt.kubeadmConfig.GetName()) + updatedObject.SetUID(tt.kubeadmConfig.GetUID()) + updatedObject.SetLabels(tt.labels) + updatedObject.SetAnnotations(tt.annotations) + g.Expect(Patch(ctx, env.Client, testMetadataFieldManager, updatedObject)).To(Succeed()) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.kubeadmConfig), tt.kubeadmConfig)).To(Succeed()) + g.Expect(cleanupTime(tt.kubeadmConfig.GetManagedFields())).To(BeComparableTo(toManagedFields(tt.expectedManagedFieldsAfterUpdatingLabelsAndAnnotations))) } - // Verify ownership of other fields is not affected. - g.Expect(tt.obj.GetManagedFields()).Should(MatchFieldOwnership( - classicManager, - metav1.ManagedFieldsOperationUpdate, - contract.Path{"f:metadata", "f:finalizers"}, - )) }) } } + +func cleanupTime(fields []metav1.ManagedFieldsEntry) []metav1.ManagedFieldsEntry { + for i := range fields { + fields[i].Time = nil + } + return fields +} + +type managedFieldEntry struct { + Manager string + Operation metav1.ManagedFieldsOperationType + FieldsV1 string + Subresource string +} + +func toManagedFields(managedFields []managedFieldEntry) []metav1.ManagedFieldsEntry { + res := []metav1.ManagedFieldsEntry{} + for _, f := range managedFields { + res = append(res, metav1.ManagedFieldsEntry{ + Manager: f.Manager, + Operation: f.Operation, + APIVersion: bootstrapv1.GroupVersion.String(), + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(trimSpaces(f.FieldsV1))}, + Subresource: f.Subresource, + }) + } + return res +} + +func trimSpaces(s string) string { + s = strings.ReplaceAll(s, "\n", "") + s = strings.ReplaceAll(s, "\t", "") + return s +} diff --git a/internal/util/ssa/matchers.go b/internal/util/ssa/matchers.go deleted file mode 100644 index 93dd8e114782..000000000000 --- a/internal/util/ssa/matchers.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ssa - -import ( - "encoding/json" - "fmt" - - "github.com/onsi/gomega/types" - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "sigs.k8s.io/cluster-api/internal/contract" -) - -// MatchManagedFieldsEntry is a gomega Matcher to check if a ManagedFieldsEntry has the given name and operation. -func MatchManagedFieldsEntry(manager string, operation metav1.ManagedFieldsOperationType) types.GomegaMatcher { - return &managedFieldMatcher{ - manager: manager, - operation: operation, - } -} - -type managedFieldMatcher struct { - manager string - operation metav1.ManagedFieldsOperationType -} - -func (mf *managedFieldMatcher) Match(actual interface{}) (bool, error) { - managedFieldsEntry, ok := actual.(metav1.ManagedFieldsEntry) - if !ok { - return false, fmt.Errorf("expecting metav1.ManagedFieldsEntry got %T", actual) - } - - return managedFieldsEntry.Manager == mf.manager && managedFieldsEntry.Operation == mf.operation, nil -} - -func (mf *managedFieldMatcher) FailureMessage(actual interface{}) string { - managedFieldsEntry := actual.(metav1.ManagedFieldsEntry) - return fmt.Sprintf("Expected ManagedFieldsEntry to match Manager:%s and Operation:%s, got Manager:%s, Operation:%s", - mf.manager, mf.operation, managedFieldsEntry.Manager, managedFieldsEntry.Operation) -} - -func (mf *managedFieldMatcher) NegatedFailureMessage(actual interface{}) string { - managedFieldsEntry := actual.(metav1.ManagedFieldsEntry) - return fmt.Sprintf("Expected ManagedFieldsEntry to not match Manager:%s and Operation:%s, got Manager:%s, Operation:%s", - mf.manager, mf.operation, managedFieldsEntry.Manager, managedFieldsEntry.Operation) -} - -// MatchFieldOwnership is a gomega Matcher to check if path is owned by the given manager and operation. -// Note: The path has to be specified as is observed in managed fields. Example: to check if the labels are owned -// by the correct manager the correct way to pass the path is contract.Path{"f:metadata","f:labels"}. -func MatchFieldOwnership(manager string, operation metav1.ManagedFieldsOperationType, path contract.Path) types.GomegaMatcher { - return &fieldOwnershipMatcher{ - path: path, - manager: manager, - operation: operation, - } -} - -type fieldOwnershipMatcher struct { - path contract.Path - manager string - operation metav1.ManagedFieldsOperationType -} - -func (fom *fieldOwnershipMatcher) Match(actual interface{}) (bool, error) { - managedFields, ok := actual.([]metav1.ManagedFieldsEntry) - if !ok { - return false, fmt.Errorf("expecting []metav1.ManagedFieldsEntry got %T", actual) - } - for _, managedFieldsEntry := range managedFields { - if managedFieldsEntry.Manager == fom.manager && managedFieldsEntry.Operation == fom.operation { - fieldsV1 := map[string]interface{}{} - if err := json.Unmarshal(managedFieldsEntry.FieldsV1.Raw, &fieldsV1); err != nil { - return false, errors.Wrap(err, "failed to parse managedFieldsEntry.FieldsV1") - } - FilterIntent(&FilterIntentInput{ - Path: contract.Path{}, - Value: fieldsV1, - ShouldFilter: IsPathNotAllowed([]contract.Path{fom.path}), - }) - return len(fieldsV1) > 0, nil - } - } - return false, nil -} - -func (fom *fieldOwnershipMatcher) FailureMessage(actual interface{}) string { - managedFields := actual.([]metav1.ManagedFieldsEntry) - return fmt.Sprintf("Expected Path %s to be owned by Manager:%s and Operation:%s, did not find correct ownership: %s", - fom.path, fom.manager, fom.operation, managedFields) -} - -func (fom *fieldOwnershipMatcher) NegatedFailureMessage(actual interface{}) string { - managedFields := actual.([]metav1.ManagedFieldsEntry) - return fmt.Sprintf("Expected Path %s to not be owned by Manager:%s and Operation:%s, did not find correct ownership: %s", - fom.path, fom.manager, fom.operation, managedFields) -}