Skip to content
Merged
4 changes: 2 additions & 2 deletions helm-chart/kuberay-operator/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ spec:
project: default
source:
repoURL: https://github.com/ray-project/kuberay
targetRevision: v1.0.0-rc.0
targetRevision: v1.6.0
path: helm-chart/kuberay-operator/crds
destination:
server: https://kubernetes.default.svc
Expand All @@ -122,7 +122,7 @@ metadata:
spec:
source:
repoURL: https://github.com/ray-project/kuberay
targetRevision: v1.0.0-rc.0
targetRevision: v1.6.0
path: helm-chart/kuberay-operator
helm:
skipCrds: true
Expand Down
4 changes: 2 additions & 2 deletions helm-chart/kuberay-operator/README.md.gotmpl
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ spec:
project: default
source:
repoURL: https://github.com/ray-project/kuberay
targetRevision: v1.0.0-rc.0
targetRevision: v1.6.0
path: helm-chart/kuberay-operator/crds
destination:
server: https://kubernetes.default.svc
Expand All @@ -124,7 +124,7 @@ metadata:
spec:
source:
repoURL: https://github.com/ray-project/kuberay
targetRevision: v1.0.0-rc.0
targetRevision: v1.6.0
path: helm-chart/kuberay-operator
helm:
skipCrds: true
Expand Down
2 changes: 1 addition & 1 deletion helm-chart/ray-cluster/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ name: ray-cluster

description: A Helm chart for deploying the RayCluster with the kuberay operator.

version: 1.1.0
version: 1.6.0

home: https://github.com/ray-project/kuberay

Expand Down
26 changes: 13 additions & 13 deletions helm-chart/ray-cluster/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# RayCluster

![Version: 1.1.0](https://img.shields.io/badge/Version-1.1.0-informational?style=flat-square)
![Version: 1.6.0](https://img.shields.io/badge/Version-1.6.0-informational?style=flat-square)

A Helm chart for deploying the RayCluster with the kuberay operator.

Expand Down Expand Up @@ -30,15 +30,15 @@ kind create cluster
# Step 2: Register a Helm chart repo
helm repo add kuberay https://ray-project.github.io/kuberay-helm/

# Step 3: Install both CRDs and KubeRay operator v1.1.0.
helm install kuberay-operator kuberay/kuberay-operator --version 1.1.0
# Step 3: Install both CRDs and KubeRay operator v1.6.0.
helm install kuberay-operator kuberay/kuberay-operator --version 1.6.0

# Step 4: Install a RayCluster custom resource
# (For x86_64 users)
helm install raycluster kuberay/ray-cluster --version 1.1.0
helm install raycluster kuberay/ray-cluster --version 1.6.0
# (For arm64 users, e.g. Mac M1)
# See here for all available arm64 images: https://hub.docker.com/r/rayproject/ray/tags?page=1&name=aarch64
helm install raycluster kuberay/ray-cluster --version 1.1.0 --set image.tag=nightly-aarch64
helm install raycluster kuberay/ray-cluster --version 1.6.0 --set image.tag=nightly-aarch64

# Step 5: Verify the installation of KubeRay operator and RayCluster
kubectl get pods
Expand Down Expand Up @@ -88,9 +88,9 @@ helm uninstall raycluster
| head.containerEnv | list | `[]` | |
| head.envFrom | list | `[]` | envFrom to pass to head pod |
| head.resources.limits.cpu | string | `"1"` | |
| head.resources.limits.memory | string | `"2G"` | |
| head.resources.limits.memory | string | `"5Gi"` | |
| head.resources.requests.cpu | string | `"1"` | |
| head.resources.requests.memory | string | `"2G"` | |
| head.resources.requests.memory | string | `"5Gi"` | |
| head.resourceClaims | list | `[]` | ResourceClaims to allocate with the head pod |
| head.annotations | object | `{}` | Extra annotations for head pod |
| head.nodeSelector | object | `{}` | Node labels for head pod assignment |
Expand All @@ -111,7 +111,7 @@ helm uninstall raycluster
| worker.groupName | string | `"workergroup"` | The name of the workergroup |
| worker.replicas | int | `1` | The number of replicas for the worker pod |
| worker.minReplicas | int | `1` | The minimum number of replicas for the worker pod |
| worker.maxReplicas | int | `3` | The maximum number of replicas for the worker pod |
| worker.maxReplicas | int | `5` | The maximum number of replicas for the worker pod |
| worker.labels | object | `{}` | Labels for the worker pod |
| worker.serviceAccountName | string | `""` | |
| worker.restartPolicy | string | `""` | |
Expand All @@ -120,9 +120,9 @@ helm uninstall raycluster
| worker.containerEnv | list | `[]` | |
| worker.envFrom | list | `[]` | envFrom to pass to worker pod |
| worker.resources.limits.cpu | string | `"1"` | |
| worker.resources.limits.memory | string | `"1G"` | |
| worker.resources.limits.memory | string | `"1Gi"` | |
| worker.resources.requests.cpu | string | `"1"` | |
| worker.resources.requests.memory | string | `"1G"` | |
| worker.resources.requests.memory | string | `"1Gi"` | |
| worker.resourceClaims | list | `[]` | ResourceClaims to allocate with the worker pod |
| worker.annotations | object | `{}` | Extra annotations for worker pod |
| worker.nodeSelector | object | `{}` | Node labels for worker pod assignment |
Expand All @@ -142,17 +142,17 @@ helm uninstall raycluster
| additionalWorkerGroups.smallGroup.disabled | bool | `true` | |
| additionalWorkerGroups.smallGroup.replicas | int | `0` | The number of replicas for the additional worker pod |
| additionalWorkerGroups.smallGroup.minReplicas | int | `0` | The minimum number of replicas for the additional worker pod |
| additionalWorkerGroups.smallGroup.maxReplicas | int | `3` | The maximum number of replicas for the additional worker pod |
| additionalWorkerGroups.smallGroup.maxReplicas | int | `5` | The maximum number of replicas for the additional worker pod |
| additionalWorkerGroups.smallGroup.labels | object | `{}` | Labels for the additional worker pod |
| additionalWorkerGroups.smallGroup.serviceAccountName | string | `""` | |
| additionalWorkerGroups.smallGroup.restartPolicy | string | `""` | |
| additionalWorkerGroups.smallGroup.runtimeClassName | string | `""` | runtimeClassName for this additional worker group. Empty string means default runtime. |
| additionalWorkerGroups.smallGroup.containerEnv | list | `[]` | |
| additionalWorkerGroups.smallGroup.envFrom | list | `[]` | envFrom to pass to additional worker pod |
| additionalWorkerGroups.smallGroup.resources.limits.cpu | int | `1` | |
| additionalWorkerGroups.smallGroup.resources.limits.memory | string | `"1G"` | |
| additionalWorkerGroups.smallGroup.resources.limits.memory | string | `"1Gi"` | |
| additionalWorkerGroups.smallGroup.resources.requests.cpu | int | `1` | |
| additionalWorkerGroups.smallGroup.resources.requests.memory | string | `"1G"` | |
| additionalWorkerGroups.smallGroup.resources.requests.memory | string | `"1Gi"` | |
| additionalWorkerGroups.smallGroup.resourceClaims | list | `[]` | ResourceClaims to allocate with the additional worker pod |
| additionalWorkerGroups.smallGroup.annotations | object | `{}` | Extra annotations for additional worker pod |
| additionalWorkerGroups.smallGroup.nodeSelector | object | `{}` | Node labels for additional worker pod assignment |
Expand Down
8 changes: 4 additions & 4 deletions helm-chart/ray-cluster/README.md.gotmpl
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,15 @@ kind create cluster
# Step 2: Register a Helm chart repo
helm repo add kuberay https://ray-project.github.io/kuberay-helm/

# Step 3: Install both CRDs and KubeRay operator v1.1.0.
helm install kuberay-operator kuberay/kuberay-operator --version 1.1.0
# Step 3: Install both CRDs and KubeRay operator v1.6.0.
helm install kuberay-operator kuberay/kuberay-operator --version 1.6.0
Comment thread
Future-Outlier marked this conversation as resolved.
Outdated

# Step 4: Install a RayCluster custom resource
# (For x86_64 users)
helm install raycluster kuberay/ray-cluster --version 1.1.0
helm install raycluster kuberay/ray-cluster --version 1.6.0
Comment thread
Future-Outlier marked this conversation as resolved.
Outdated
# (For arm64 users, e.g. Mac M1)
# See here for all available arm64 images: https://hub.docker.com/r/rayproject/ray/tags?page=1&name=aarch64
helm install raycluster kuberay/ray-cluster --version 1.1.0 --set image.tag=nightly-aarch64
helm install raycluster kuberay/ray-cluster --version 1.6.0 --set image.tag=nightly-aarch64

# Step 5: Verify the installation of KubeRay operator and RayCluster
kubectl get pods
Expand Down
16 changes: 8 additions & 8 deletions helm-chart/ray-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -128,10 +128,10 @@ head:
limits:
cpu: "1"
# To avoid out-of-memory issues, never allocate less than 2G memory for the Ray head.
memory: "2G"
memory: "5Gi"
requests:
cpu: "1"
memory: "2G"
memory: "5Gi"
Comment thread
Future-Outlier marked this conversation as resolved.
Comment thread
Future-Outlier marked this conversation as resolved.

# -- ResourceClaims to allocate with the head pod
resourceClaims: []
Expand Down Expand Up @@ -210,7 +210,7 @@ worker:
minReplicas: 1

# -- The maximum number of replicas for the worker pod
maxReplicas: 3
maxReplicas: 5

# -- Labels for the worker pod
labels: {}
Expand Down Expand Up @@ -249,10 +249,10 @@ worker:
resources:
limits:
cpu: "1"
memory: "1G"
memory: "1Gi"
Comment thread
cursor[bot] marked this conversation as resolved.
requests:
cpu: "1"
memory: "1G"
memory: "1Gi"
Comment thread
cursor[bot] marked this conversation as resolved.

# -- ResourceClaims to allocate with the worker pod
resourceClaims: []
Expand Down Expand Up @@ -324,7 +324,7 @@ additionalWorkerGroups:
minReplicas: 0

# -- The maximum number of replicas for the additional worker pod
maxReplicas: 3
maxReplicas: 5

# -- Labels for the additional worker pod
labels: {}
Expand Down Expand Up @@ -360,10 +360,10 @@ additionalWorkerGroups:
resources:
limits:
cpu: 1
memory: "1G"
memory: "1Gi"
requests:
cpu: 1
memory: "1G"
memory: "1Gi"

# -- ResourceClaims to allocate with the additional worker pod
resourceClaims: []
Expand Down
2 changes: 1 addition & 1 deletion ray-operator/config/samples/ray-cluster.sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ spec:
memory: "5Gi"
requests:
cpu: "1"
memory: "2Gi"
memory: "5Gi"
Comment thread
Future-Outlier marked this conversation as resolved.
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Btw, I think the e2e test output is failing since you increased the memory here:


[2026-03-16T15:43:22Z] Running Suite: Kubectl Ray e2e Test Suite - /workdir/kubectl-plugin/test/e2e
--
[2026-03-16T15:43:22Z] ============================================================================
[2026-03-16T15:43:22Z] Random Seed: 1773674078
[2026-03-16T15:43:22Z]
[2026-03-16T15:43:22Z] Will run 30 of 30 specs
[2026-03-16T15:43:22Z] ••••••••••••
[2026-03-16T15:43:22Z] ##############################
[2026-03-16T15:43:22Z] • [FAILED] [36.174 seconds]
[2026-03-16T15:43:22Z] Calling ray plugin `get` command [It] succeed in getting ray cluster information
[2026-03-16T15:43:22Z] /workdir/kubectl-plugin/test/e2e/kubectl_ray_cluster_get_test.go:28
[2026-03-16T15:43:22Z]
[2026-03-16T15:43:22Z]   [FAILED] Expected
[2026-03-16T15:43:22Z]       <string>: NAME                 NAMESPACE       DESIRED WORKERS   AVAILABLE WORKERS   CPUS   GPUS   TPUS   MEMORY   CONDITION               STATUS   AGE
[2026-03-16T15:43:22Z]       raycluster-kuberay   test-ns-usf5j   1                 1                   2      0      0      6Gi      RayClusterProvisioned   ready    25s
[2026-03-16T15:43:22Z]   to contain substring
[2026-03-16T15:43:22Z]       <string>: NAME                 NAMESPACE       DESIRED WORKERS   AVAILABLE WORKERS   CPUS   GPUS   TPUS   MEMORY   CONDITION               STATUS   AGE
[2026-03-16T15:43:22Z]       raycluster-kuberay   test-ns-usf5j   1                 1                   2      0      0      3Gi      RayClusterProvisioned   ready
[2026-03-16T15:43:22Z]   In [It] at: /workdir/kubectl-plugin/test/e2e/kubectl_ray_cluster_get_test.go:69 @ 03/16/26 15:28:44.638
[2026-03-16T15:43:22Z] ##############################
[2026-03-16T15:43:22Z] •••••••••••••••••
[2026-03-16T15:43:22Z]
[2026-03-16T15:43:22Z] Summarizing 1 Failure:
[2026-03-16T15:43:22Z]   [FAIL] Calling ray plugin `get` command [It] succeed in getting ray cluster information
[2026-03-16T15:43:22Z]   /workdir/kubectl-plugin/test/e2e/kubectl_ray_cluster_get_test.go:69
[2026-03-16T15:43:22Z]
[2026-03-16T15:43:22Z] Ran 30 of 30 Specs in 1724.141 seconds
[2026-03-16T15:43:22Z] FAIL! -- 29 Passed \| 1 Failed \| 0 Pending \| 0 Skipped
[2026-03-16T15:43:22Z] ### FAIL: TestKubectlRayCommand (1724.14s)
[2026-03-16T15:43:22Z] FAIL
[2026-03-16T15:43:22Z] FAIL	github.com/ray-project/kuberay/kubectl-plugin/test/e2e	1724.148s
[2026-03-16T15:43:22Z] FAIL
[2026-03-16T15:43:24Z] 🚨 Error: The command exited with status 1
[2026-03-16T15:43:24Z] user command error: The plugin docker command hook exited with status 1


Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed, tks!

ports:
- containerPort: 6379
name: gcs-server
Expand Down
Loading