diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 924efa474790..af76f23da4e7 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/internal/hooks" + "sigs.k8s.io/cluster-api/internal/util/inplace" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/failuredomains" @@ -199,12 +200,7 @@ func (c *ControlPlane) MachinesToCompleteTriggerInPlaceUpdate() collections.Mach // MachinesToCompleteInPlaceUpdate returns Machines that still have to complete their in-place update. func (c *ControlPlane) MachinesToCompleteInPlaceUpdate() collections.Machines { - return c.Machines.Filter(func(machine *clusterv1.Machine) bool { - // Note: Checking both annotations here to make this slightly more robust. - // Theoretically only checking for IsPending would have been enough. - _, ok := machine.Annotations[clusterv1.UpdateInProgressAnnotation] - return ok || hooks.IsPending(runtimehooksv1.UpdateMachine, machine) - }) + return c.Machines.Filter(inplace.IsUpdateInProgress) } // FailureDomainWithMostMachines returns the fd with most machines in it and at least one eligible machine in it. diff --git a/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md b/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md index f07abb9664b2..82c8024ddc96 100644 --- a/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md +++ b/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md @@ -304,7 +304,7 @@ created, the extension will detect the associated service and discover the assoc check the status of the ExtensionConfig. Below is an example of `ExtensionConfig` - ```yaml -apiVersion: runtime.cluster.x-k8s.io/v1alpha1 +apiVersion: runtime.cluster.x-k8s.io/v1beta2 kind: ExtensionConfig metadata: annotations: diff --git a/docs/proposals/20220221-runtime-SDK.md b/docs/proposals/20220221-runtime-SDK.md index 1e719bf99968..ac008d094472 100644 --- a/docs/proposals/20220221-runtime-SDK.md +++ b/docs/proposals/20220221-runtime-SDK.md @@ -296,7 +296,7 @@ behavior is introduced by this proposal: The Cluster administrator is required to register available Runtime Extension server using the following CR: ```yaml -apiVersion: runtime.cluster.x-k8s.io/v1alpha1 +apiVersion: runtime.cluster.x-k8s.io/v1beta2 kind: ExtensionConfig metadata: name: "my-amazing-extensions" @@ -326,7 +326,7 @@ The `namespaceSelector` will enable targeting of a subset of Clusters. ```yaml -apiVersion: runtime.cluster.x-k8s.io/v1alpha1 +apiVersion: runtime.cluster.x-k8s.io/v1beta2 kind: ExtensionConfig metadata: name: "my-amazing-extensions" @@ -367,8 +367,7 @@ dependencies among Runtime Extensions, being it modeled with something similar t [systemd unit options](https://www.freedesktop.org/software/systemd/man/systemd.unit.html) or alternative approaches. The main reason behind that is that such type of feature introduces complexity and creates "pet" like relations across -components making the overall system more fragile. This is also consistent with the [avoid dependencies](#avoid-dependencies) -recommendation above. +components making the overall system more fragile. This is also consistent with the avoid dependencies recommendation. ## Runtime Hooks developer guide (CAPI internals) diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go b/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go index c352307ff518..230f4b3e2603 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go @@ -415,7 +415,7 @@ func (p *rolloutPlanner) reconcileInPlaceUpdateIntent(ctx context.Context) error } // Check if the MachineSet can update in place; if not, move to the next MachineSet. - canUpdateMachineSetInPlaceFunc := func(_ *clusterv1.MachineSet) bool { return false } + canUpdateMachineSetInPlaceFunc := func(_ *clusterv1.MachineSet) bool { return true } if p.overrideCanUpdateMachineSetInPlace != nil { canUpdateMachineSetInPlaceFunc = p.overrideCanUpdateMachineSetInPlace } diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate_test.go b/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate_test.go index 293f03b1a17f..a080d205f572 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate_test.go @@ -1753,7 +1753,10 @@ func runRollingUpdateTestCase(ctx context.Context, t *testing.T, tt rollingUpdat // Running a small subset of MD reconcile (the rollout logic and a bit of setReplicas) p := newRolloutPlanner() - p.overrideCanUpdateMachineSetInPlace = tt.overrideCanUpdateMachineSetInPlaceFunc + p.overrideCanUpdateMachineSetInPlace = func(_ *clusterv1.MachineSet) bool { return false } + if tt.overrideCanUpdateMachineSetInPlaceFunc != nil { + p.overrideCanUpdateMachineSetInPlace = tt.overrideCanUpdateMachineSetInPlaceFunc + } p.overrideComputeDesiredMS = func(ctx context.Context, deployment *clusterv1.MachineDeployment, currentNewMS *clusterv1.MachineSet) (*clusterv1.MachineSet, error) { log := ctrl.LoggerFrom(ctx) desiredNewMS := currentNewMS diff --git a/test/e2e/cluster_in_place_update.go b/test/e2e/cluster_in_place_update.go index d19f37740766..0fb8d351a4b3 100644 --- a/test/e2e/cluster_in_place_update.go +++ b/test/e2e/cluster_in_place_update.go @@ -209,7 +209,13 @@ func ClusterInPlaceUpdateSpec(ctx context.Context, inputGetter func() ClusterInP // Doing multiple in-place updates for additional coverage. filePath := "/tmp/test" - for i, fileContent := range []string{"first in-place update", "second in-place update"} { + for i, fileContent := range []string{ + "first in-place update", + "second in-place update", + "third in-place update", + "fourth in-place update", + "five in-place update", + } { Byf("[%d] Trigger in-place update by modifying the files variable", i) originalCluster := cluster.DeepCopy() @@ -246,8 +252,7 @@ func ClusterInPlaceUpdateSpec(ctx context.Context, inputGetter func() ClusterInP // Ensure only in-place updates were executed and no Machine was re-created. machineObjectsAfterInPlaceUpdate = getMachineObjects(ctx, g, mgmtClient, cluster) g.Expect(machineNames(machineObjectsAfterInPlaceUpdate.ControlPlaneMachines)).To(Equal(machineNames(machineObjectsBeforeInPlaceUpdate.ControlPlaneMachines))) - // TODO(in-place): enable once MD/MS/Machine controller PRs are merged - // g.Expect(machineNames(machineObjectsAfterInPlaceUpdate.WorkerMachines)).To(Equal(machineNames(machineObjectsBeforeInPlaceUpdate.WorkerMachines))) + g.Expect(machineNames(machineObjectsAfterInPlaceUpdate.WorkerMachines)).To(Equal(machineNames(machineObjectsBeforeInPlaceUpdate.WorkerMachines))) }, input.E2EConfig.GetIntervals(specName, "wait-control-plane")...).Should(Succeed()) // Update machineObjectsBeforeInPlaceUpdate for the next round of in-place update. diff --git a/test/extension/config/tilt/extensionconfig.yaml b/test/extension/config/tilt/extensionconfig.yaml index ceb27024b1d1..cd91a9a4706e 100644 --- a/test/extension/config/tilt/extensionconfig.yaml +++ b/test/extension/config/tilt/extensionconfig.yaml @@ -1,4 +1,4 @@ -apiVersion: runtime.cluster.x-k8s.io/v1alpha1 +apiVersion: runtime.cluster.x-k8s.io/v1beta2 kind: ExtensionConfig metadata: annotations: @@ -17,4 +17,4 @@ spec: - key: kubernetes.io/metadata.name operator: In values: - - default # Note: this assumes the test extension is used by Cluster in the default namespace only \ No newline at end of file + - default # Note: this assumes the test extension is used by Cluster in the default namespace only diff --git a/test/extension/handlers/inplaceupdate/handlers.go b/test/extension/handlers/inplaceupdate/handlers.go index b2a32617f293..2e4b51e36394 100644 --- a/test/extension/handlers/inplaceupdate/handlers.go +++ b/test/extension/handlers/inplaceupdate/handlers.go @@ -32,6 +32,7 @@ import ( "gomodules.xyz/jsonpatch/v2" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -198,7 +199,7 @@ func (h *ExtensionHandlers) DoUpdateMachine(ctx context.Context, req *runtimehoo // Note: We are intentionally not actually applying any in-place changes we are just faking them, // which is good enough for test purposes. if firstTimeCalled, ok := h.state.Load(key); ok { - if time.Since(firstTimeCalled.(time.Time)) > 20*time.Second { + if time.Since(firstTimeCalled.(time.Time)) > time.Duration(20+rand.Intn(10))*time.Second { h.state.Delete(key) resp.Status = runtimehooksv1.ResponseStatusSuccess resp.Message = "In-place update is done"