Enable Windows IPAM when creating a Windows nodegroup, deprecate install-vpc-controllers#4338
Conversation
016e8cc to
384a7ae
Compare
| minSize: 2 | ||
| maxSize: 3 | ||
|
|
||
| managedNodeGroups: |
| func (w *IPAM) Enable(ctx context.Context) error { | ||
| configMaps := w.Clientset.CoreV1().ConfigMaps(metav1.NamespaceSystem) | ||
| vpcCNIConfig, err := configMaps.Get(ctx, vpcCNIName, metav1.GetOptions{}) | ||
| if err != nil { | ||
| if !apierrors.IsNotFound(err) { | ||
| return errors.Wrap(err, "error getting resource") | ||
| } | ||
| return createConfigMap(ctx, configMaps) | ||
| } | ||
|
|
||
| if val, ok := vpcCNIConfig.Data[windowsIPAMField]; ok && val == "true" { | ||
| logger.Info("Windows IPAM is already enabled") | ||
| return nil | ||
| } | ||
|
|
||
| patch, err := createPatch(vpcCNIConfig) | ||
| if err != nil { | ||
| return errors.Wrap(err, "error creating merge patch") | ||
| } | ||
|
|
||
| _, err = configMaps.Patch(ctx, vpcCNIName, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) | ||
| if err != nil { | ||
| return errors.Wrap(err, "failed to patch resource") | ||
| } | ||
| return nil | ||
| } | ||
|
|
||
| func createPatch(cm *corev1.ConfigMap) ([]byte, error) { | ||
| oldData, err := json.Marshal(cm) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| cm.Data[windowsIPAMField] = "true" | ||
| modifiedData, err := json.Marshal(cm) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| return jsonpatch.CreateMergePatch(oldData, modifiedData) | ||
| } | ||
|
|
||
| func createConfigMap(ctx context.Context, configMaps corev1client.ConfigMapInterface) error { | ||
| cm := &corev1.ConfigMap{ | ||
| TypeMeta: metav1.TypeMeta{ | ||
| Kind: "ConfigMap", | ||
| APIVersion: "v1", | ||
| }, | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: vpcCNIName, | ||
| Namespace: vpcCNINamespace, | ||
| }, | ||
| Data: map[string]string{ | ||
| windowsIPAMField: "true", | ||
| }, | ||
| } | ||
| _, err := configMaps.Create(ctx, cm, metav1.CreateOptions{}) | ||
| return err | ||
| } |
There was a problem hiding this comment.
I was about to say the same.
There was a problem hiding this comment.
I have added a test suite.
pkg/windows/ipam.go
Outdated
| Clientset kubernetes.Interface | ||
| } | ||
|
|
||
| // Enable enables Windows IPAM |
There was a problem hiding this comment.
| // Enable enables Windows IPAM | |
| // Enable enables Windows IPAM in the VPC CNI configmap |
pkg/windows/ipam.go
Outdated
| vpcCNIConfig, err := configMaps.Get(ctx, vpcCNIName, metav1.GetOptions{}) | ||
| if err != nil { | ||
| if !apierrors.IsNotFound(err) { | ||
| return errors.Wrap(err, "error getting resource") |
There was a problem hiding this comment.
| return errors.Wrap(err, "error getting resource") | |
| return errors.Wrapf(err, "error getting %q configmap", vpcCNIName) |
There was a problem hiding this comment.
There are more fmt.Errorf results than errors.Wrapf. Which one do we use under what circumstance? It would be nice to be uniform. :)
There was a problem hiding this comment.
agreed, I prefer fmt.Errorf as it has %w for wrapping errors
There was a problem hiding this comment.
Most calls to fmt.Errorf are used without the %w verb, so they are equivalent to errors.Errorf. For consistency, I prefer using errors.Wrapf.
pkg/windows/ipam.go
Outdated
|
|
||
| _, err = configMaps.Patch(ctx, vpcCNIName, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) | ||
| if err != nil { | ||
| return errors.Wrap(err, "failed to patch resource") |
There was a problem hiding this comment.
| return errors.Wrap(err, "failed to patch resource") | |
| return errors.Wrapf(err, "failed to patch %q configmap", vpcCNIName) |
| } | ||
|
|
||
| // Enable enables Windows IPAM | ||
| func (w *IPAM) Enable(ctx context.Context) error { |
There was a problem hiding this comment.
what happens if someone enables this functionality while also having the legacy windows VPC controller installed? Do we need to warn them?
There was a problem hiding this comment.
We are not providing a dedicated command to enable this functionality, but rather enabling IPAM behind the scenes when a Windows nodegroup is created as part of create cluster or create nodegroup. We log a warning in create cluster and utils install-vpc-controllers.
There was a problem hiding this comment.
what about existing clusters with windows nodegroups?
There was a problem hiding this comment.
We don't query the cluster for existing Windows nodegroups, but an improvement would be to log the same warning if the cluster has Windows nodegroups and VPC controller installed.
| "context" | ||
| "encoding/json" | ||
|
|
||
| "github.com/kris-nova/logger" | ||
|
|
||
| "k8s.io/apimachinery/pkg/types" | ||
|
|
||
| jsonpatch "github.com/evanphx/json-patch/v5" | ||
|
|
||
| corev1client "k8s.io/client-go/kubernetes/typed/core/v1" | ||
|
|
||
| corev1 "k8s.io/api/core/v1" | ||
|
|
||
| apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
|
|
||
| "github.com/pkg/errors" | ||
|
|
||
| "k8s.io/client-go/kubernetes" |
There was a problem hiding this comment.
This is a nit, but would you mind ordering these imports please?
There was a problem hiding this comment.
| "context" | |
| "encoding/json" | |
| "github.com/kris-nova/logger" | |
| "k8s.io/apimachinery/pkg/types" | |
| jsonpatch "github.com/evanphx/json-patch/v5" | |
| corev1client "k8s.io/client-go/kubernetes/typed/core/v1" | |
| corev1 "k8s.io/api/core/v1" | |
| apierrors "k8s.io/apimachinery/pkg/api/errors" | |
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | |
| "github.com/pkg/errors" | |
| "k8s.io/client-go/kubernetes" | |
| import ( | |
| "context" | |
| "encoding/json" | |
| jsonpatch "github.com/evanphx/json-patch/v5" | |
| "github.com/kris-nova/logger" | |
| "github.com/pkg/errors" | |
| corev1 "k8s.io/api/core/v1" | |
| apierrors "k8s.io/apimachinery/pkg/api/errors" | |
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | |
| "k8s.io/apimachinery/pkg/types" | |
| "k8s.io/client-go/kubernetes" | |
| corev1client "k8s.io/client-go/kubernetes/typed/core/v1" | |
| ) |
There was a problem hiding this comment.
Sounds good. I usually rely on goimports to do the sorting.
There was a problem hiding this comment.
Huh, I do the same. This the result of it. 🤔 I don't think I'm setting any funky settings.
| func createPatch(cm *corev1.ConfigMap) ([]byte, error) { | ||
| oldData, err := json.Marshal(cm) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| cm.Data[windowsIPAMField] = "true" | ||
| modifiedData, err := json.Marshal(cm) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| return jsonpatch.CreateMergePatch(oldData, modifiedData) | ||
| } |
There was a problem hiding this comment.
I'm not following this logic. You already have the original object. You don't need jsonpatch here.
Just use this:
newCM := cm.DeepCopy()
newCM.Data[windowsIPAMField] = "true"
data, err := json.Marshal(newCM)
if err != nil {
return nil, err
}
if _, err := configMaps.Patch(ctx, vpcCNIName, types.StrategicMergePatchType, data, metav1.PatchOptions{}); err != nil {
return errors.Wrapf(err, "failed to patch %q configmap", vpcCNIName)
}And done. Unless I'm mis-reading something :)
There was a problem hiding this comment.
We do need a JSON patch here to avoid overwriting any changes made by other clients between get configmap and patch configmap.
There was a problem hiding this comment.
Oh wait. But then shouldn't you be defining a JSONPatchType instead of strategic merge?
There was a problem hiding this comment.
The StrategicMergePatchType constant is actually the application/strategic-merge-patch+json media type, which also handles JSON patch.
There was a problem hiding this comment.
ah, okay. Cool then :) You win.:D
|
|
||
| From version 1.14, Amazon EKS supports [Windows Nodes][eks-user-guide] that allow running Windows containers. | ||
| In addition to having Windows nodes, a Linux node in the cluster is required to run the VPC resource controller and CoreDNS, as Microsoft doesn't support host-networking mode yet. Thus, a Windows EKS cluster will be a mixed-mode cluster containing Windows nodes and at least one Linux node. | ||
| In addition to having Windows nodes, a Linux node in the cluster is required to run CoreDNS, as Microsoft doesn't support host-networking mode yet. Thus, a Windows EKS cluster will be a mixed-mode cluster containing Windows nodes and at least one Linux node. |
There was a problem hiding this comment.
| In addition to having Windows nodes, a Linux node in the cluster is required to run CoreDNS, as Microsoft doesn't support host-networking mode yet. Thus, a Windows EKS cluster will be a mixed-mode cluster containing Windows nodes and at least one Linux node. | |
| In addition to having Windows nodes, a Linux node in the cluster is required to run CoreDNS, as Microsoft doesn't support host-networking mode yet. Thus, a Windows EKS cluster will be a mixed-node cluster containing Windows nodes and at least one Linux node. |
I think that's supposed to be mixed-node, right? I can see that it was mixed-mode previously as well, but I think that's a typo.
There was a problem hiding this comment.
I'm unsure because mixed-node also seems unclear to me. WDYT about rephrasing it to "Thus, a Windows EKS cluster will be a mixture of Windows nodes and at least one Linux node"?
There was a problem hiding this comment.
Yep, that sounds a lot better. Thanks! :)
| You no longer need to install the VPC resource controller on Linux worker nodes to run Windows workloads in EKS clusters. | ||
| You can enable Windows IP address management on the EKS control plane via a ConfigMap setting (see https://todo.com for details). | ||
| eksctl will automatically patch the ConfigMap to enable Windows IP address management when a Windows nodegroup is created. | ||
| For existing clusters, you can enable it manually and run `eksctl utils install-vpc-controllers` with the `--delete` flag |
There was a problem hiding this comment.
| For existing clusters, you can enable it manually and run `eksctl utils install-vpc-controllers` with the `--delete` flag | |
| For existing clusters, you can enable it manually by running `eksctl utils install-vpc-controllers` with the `--delete` flag |
There was a problem hiding this comment.
eksctl utils install-vpc-controllers --delete does not enable Windows IPAM. In this part, we are asking users to enable IPAM manually by following the linked documentation and then delete VPC controller which is no longer required.
There was a problem hiding this comment.
Ah, so, enabling it manually is a step they have to take? Can you separate the two sentences then please? Because reading that made me think I'm enabling it by running that command. :)
There was a problem hiding this comment.
Just put a , after manually I guess.
There was a problem hiding this comment.
Ah, so, enabling it manually is a step they have to take?
Correct.
Can you separate the two sentences then please?
Sure, makes sense.
2357367 to
a6da10a
Compare
Description
This changelist enables Windows IPAM when creating a new nodegroup in
create clusterandcreate nodegroup, and adds a--deleteflag toeksctl utils install-vpc-controllersto allow deleting an existing installation of VPC controller from worker nodes.Closes #2401
Logs from testing:
Checklist
README.md, or theuserdocsdirectory)area/nodegroup) and kind (e.g.kind/improvement)BONUS POINTS checklist: complete for good vibes and maybe prizes?! 🤯