diff --git a/api/core/v1beta1/conversion.go b/api/core/v1beta1/conversion.go index b7d2a57dc148..0bc356ce993c 100644 --- a/api/core/v1beta1/conversion.go +++ b/api/core/v1beta1/conversion.go @@ -408,6 +408,10 @@ func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { // Recover other values. if ok { dst.Spec.MinReadySeconds = restored.Spec.MinReadySeconds + // Restore the phase, this also means that any client using v1beta1 during a round-trip + // won't be able to write the Phase field. But that's okay as the only client writing the Phase + // field should be the Machine controller. + dst.Status.Phase = restored.Status.Phase } return nil @@ -1653,6 +1657,13 @@ func Convert_v1beta2_MachineStatus_To_v1beta1_MachineStatus(in *clusterv1.Machin if err := autoConvert_v1beta2_MachineStatus_To_v1beta1_MachineStatus(in, out, s); err != nil { return err } + + // Convert v1beta2 Updating phase to v1beta1 Running as Updating did not exist in v1beta1. + // We don't have to support a round-trip as only the core CAPI controller should write the Phase field. + if out.Phase == "Updating" { + out.Phase = "Running" + } + if !reflect.DeepEqual(in.LastUpdated, metav1.Time{}) { out.LastUpdated = ptr.To(in.LastUpdated) } diff --git a/api/core/v1beta1/conversion_test.go b/api/core/v1beta1/conversion_test.go index 10d557048460..172f76e186fb 100644 --- a/api/core/v1beta1/conversion_test.go +++ b/api/core/v1beta1/conversion_test.go @@ -514,6 +514,9 @@ func hubMachineSpec(in *clusterv1.MachineSpec, c randfill.Continue) { func hubMachineStatus(in *clusterv1.MachineStatus, c randfill.Continue) { c.FillNoCustom(in) + + in.Phase = []string{"Updating", "Running"}[c.Intn(2)] + // Drop empty structs with only omit empty fields. if in.Deprecated != nil { if in.Deprecated.V1Beta1 == nil || reflect.DeepEqual(in.Deprecated.V1Beta1, &clusterv1.MachineV1Beta1DeprecatedStatus{}) { diff --git a/api/core/v1beta2/machine_phase_types.go b/api/core/v1beta2/machine_phase_types.go index 1ca955156444..b849ea61d654 100644 --- a/api/core/v1beta2/machine_phase_types.go +++ b/api/core/v1beta2/machine_phase_types.go @@ -45,6 +45,10 @@ const ( // become a Kubernetes Node in a Ready state. MachinePhaseRunning = MachinePhase("Running") + // MachinePhaseUpdating is the Machine state when the Machine + // is updating. + MachinePhaseUpdating = MachinePhase("Updating") + // MachinePhaseDeleting is the Machine state when a delete // request has been sent to the API Server, // but its infrastructure has not yet been fully deleted. diff --git a/api/core/v1beta2/machine_types.go b/api/core/v1beta2/machine_types.go index 7e752f65a733..78f0b1fab487 100644 --- a/api/core/v1beta2/machine_types.go +++ b/api/core/v1beta2/machine_types.go @@ -570,7 +570,7 @@ type MachineStatus struct { // phase represents the current phase of machine actuation. // +optional - // +kubebuilder:validation:Enum=Pending;Provisioning;Provisioned;Running;Deleting;Deleted;Failed;Unknown + // +kubebuilder:validation:Enum=Pending;Provisioning;Provisioned;Running;Updating;Deleting;Deleted;Failed;Unknown Phase string `json:"phase,omitempty"` // certificatesExpiryDate is the expiry date of the machine certificates. @@ -728,6 +728,7 @@ func (m *MachineStatus) GetTypedPhase() MachinePhase { MachinePhaseProvisioning, MachinePhaseProvisioned, MachinePhaseRunning, + MachinePhaseUpdating, MachinePhaseDeleting, MachinePhaseDeleted, MachinePhaseFailed: diff --git a/config/crd/bases/cluster.x-k8s.io_machines.yaml b/config/crd/bases/cluster.x-k8s.io_machines.yaml index 12f910515b07..bc29bfcfe003 100644 --- a/config/crd/bases/cluster.x-k8s.io_machines.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -2122,6 +2122,7 @@ spec: - Provisioning - Provisioned - Running + - Updating - Deleting - Deleted - Failed diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index c1e7a74dc837..8f570e084bcb 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -837,6 +837,10 @@ func setMachinePhaseAndLastUpdated(_ context.Context, m *clusterv1.Machine) { m.Status.SetTypedPhase(clusterv1.MachinePhaseRunning) } + if conditions.IsTrue(m, clusterv1.MachineUpdatingCondition) { + m.Status.SetTypedPhase(clusterv1.MachinePhaseUpdating) + } + // Set the phase to "deleting" if the deletion timestamp is set. if !m.DeletionTimestamp.IsZero() { m.Status.SetTypedPhase(clusterv1.MachinePhaseDeleting) diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 33604d58c344..7398bc6b6827 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -26,12 +26,16 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" + "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -2444,6 +2448,104 @@ func TestReconcileMachinePhases(t *testing.T) { }, 10*time.Second).Should(BeTrue()) }) + t.Run("Should set `Updating` when Machine is in-place updating", func(t *testing.T) { + utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.InPlaceUpdates, true) + + g := NewWithT(t) + + ns, err := env.CreateNamespace(ctx, "test-reconcile-machine-phases") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { + g.Expect(env.Cleanup(ctx, ns)).To(Succeed()) + }() + + nodeProviderID := fmt.Sprintf("test://%s", util.RandomString(6)) + + cluster := defaultCluster.DeepCopy() + cluster.Namespace = ns.Name + + bootstrapConfig := defaultBootstrap.DeepCopy() + bootstrapConfig.SetNamespace(ns.Name) + infraMachine := defaultInfra.DeepCopy() + infraMachine.SetNamespace(ns.Name) + g.Expect(unstructured.SetNestedField(infraMachine.Object, nodeProviderID, "spec", "providerID")).To(Succeed()) + machine := defaultMachine.DeepCopy() + machine.Namespace = ns.Name + + // Create Node. + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "machine-test-node-", + }, + Spec: corev1.NodeSpec{ProviderID: nodeProviderID}, + } + g.Expect(env.Create(ctx, node)).To(Succeed()) + defer func() { + g.Expect(env.Cleanup(ctx, node)).To(Succeed()) + }() + + g.Expect(env.Create(ctx, cluster)).To(Succeed()) + defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster)) + g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed()) + // Set InfrastructureReady to true so ClusterCache creates the clusterAccessor. + patch := client.MergeFrom(cluster.DeepCopy()) + cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true) + g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed()) + + g.Expect(env.Create(ctx, bootstrapConfig)).To(Succeed()) + g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) + // We have to subtract 2 seconds, because .status.lastUpdated does not contain milliseconds. + preUpdate := time.Now().Add(-2 * time.Second) + // Create and wait on machine to make sure caches sync and reconciliation triggers. + g.Expect(env.CreateAndWait(ctx, machine)).To(Succeed()) + + modifiedMachine := machine.DeepCopy() + // Set NodeRef. + machine.Status.NodeRef = clusterv1.MachineNodeReference{Name: node.Name} + g.Expect(env.Status().Patch(ctx, modifiedMachine, client.MergeFrom(machine))).To(Succeed()) + + // Set bootstrap ready. + modifiedBootstrapConfig := bootstrapConfig.DeepCopy() + g.Expect(unstructured.SetNestedField(modifiedBootstrapConfig.Object, true, "status", "initialization", "dataSecretCreated")).To(Succeed()) + g.Expect(unstructured.SetNestedField(modifiedBootstrapConfig.Object, "secret-data", "status", "dataSecretName")).To(Succeed()) + g.Expect(env.Status().Patch(ctx, modifiedBootstrapConfig, client.MergeFrom(bootstrapConfig))).To(Succeed()) + + // Set infra ready. + modifiedInfraMachine := infraMachine.DeepCopy() + g.Expect(unstructured.SetNestedField(modifiedInfraMachine.Object, true, "status", "initialization", "provisioned")).To(Succeed()) + g.Expect(env.Status().Patch(ctx, modifiedInfraMachine, client.MergeFrom(infraMachine))).To(Succeed()) + + // Set annotations on Machine, BootstrapConfig and InfraMachine to trigger an in-place update. + orig := modifiedBootstrapConfig.DeepCopy() + annotations.AddAnnotations(modifiedBootstrapConfig, map[string]string{ + clusterv1.UpdateInProgressAnnotation: "", + }) + g.Expect(env.Patch(ctx, modifiedBootstrapConfig, client.MergeFrom(orig))).To(Succeed()) + orig = modifiedInfraMachine.DeepCopy() + annotations.AddAnnotations(modifiedInfraMachine, map[string]string{ + clusterv1.UpdateInProgressAnnotation: "", + }) + g.Expect(env.Patch(ctx, modifiedInfraMachine, client.MergeFrom(orig))).To(Succeed()) + origMachine := modifiedMachine.DeepCopy() + annotations.AddAnnotations(modifiedMachine, map[string]string{ + runtimev1.PendingHooksAnnotation: "UpdateMachine", + clusterv1.UpdateInProgressAnnotation: "", + }) + g.Expect(env.Patch(ctx, modifiedMachine, client.MergeFrom(origMachine))).To(Succeed()) + + // Wait until Machine was reconciled. + g.Eventually(func(g Gomega) bool { + if err := env.DirectAPIServerGet(ctx, client.ObjectKeyFromObject(machine), machine); err != nil { + return false + } + g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseUpdating)) + // Verify that the LastUpdated timestamp was updated + g.Expect(machine.Status.LastUpdated.IsZero()).To(BeFalse()) + g.Expect(machine.Status.LastUpdated.After(preUpdate)).To(BeTrue()) + return true + }, 10*time.Second).Should(BeTrue()) + }) + t.Run("Should set `Provisioned` when there is a ProviderID and there is no Node", func(t *testing.T) { g := NewWithT(t) diff --git a/internal/controllers/machine/suite_test.go b/internal/controllers/machine/suite_test.go index 34aee85bdb4c..4a171919e4d3 100644 --- a/internal/controllers/machine/suite_test.go +++ b/internal/controllers/machine/suite_test.go @@ -39,6 +39,8 @@ import ( "sigs.k8s.io/cluster-api/api/core/v1beta2/index" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/remote" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" + fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/envtest" ) @@ -99,8 +101,10 @@ func TestMain(m *testing.M) { clusterCache.(interface{ DisablePrivateKeyGeneration() }).DisablePrivateKeyGeneration() if err := (&Reconciler{ - Client: mgr.GetClient(), - APIReader: mgr.GetAPIReader(), + Client: mgr.GetClient(), + APIReader: mgr.GetAPIReader(), + // Just adding a minimal RuntimeClient to avoid panics in tests. + RuntimeClient: fakeruntimeclient.NewRuntimeClientBuilder().WithCatalog(runtimecatalog.New()).Build(), ClusterCache: clusterCache, RemoteConditionsGracePeriod: 5 * time.Minute, AdditionalSyncMachineLabels: nil,