From 9bb0cc208f909d1abcffa1654d0d30a576c66e81 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Thu, 4 Dec 2025 16:30:30 +0100 Subject: [PATCH 01/21] K8SPXC-1683 add pmm and 8.4 to smart-update test --- e2e-tests/run-pr.csv | 19 +++- .../conf/operator.9.9.9.pxc-operator.dep.json | 32 ++++++- .../conf/operator.9.9.9.pxc-operator.json | 30 ++++-- .../conf/smart-update-haproxy.yml | 2 +- ...smart-update-version-service-reachable.yml | 2 +- ...art-update-version-service-unreachable.yml | 2 +- e2e-tests/smart-update1/conf/smart-update.yml | 2 +- e2e-tests/smart-update1/conf/vs.yml | 2 +- ...lemetry.version-service-cr-8.4-cw.log.json | 21 +++++ ..._telemetry.version-service-cr-8.4.log.json | 20 ++++ e2e-tests/smart-update2/compare/test1.yaml | 23 +++++ e2e-tests/smart-update2/compare/test2.yaml | 21 +++++ .../conf/operator.9.9.9.pxc-operator.dep.json | 30 +++++- .../conf/operator.9.9.9.pxc-operator.json | 30 ++++-- .../conf/smart-update-haproxy.yml | 2 +- .../smart-update2/conf/smart-update-pmm3.yaml | 80 ++++++++++++++++ ...smart-update-version-service-reachable.yml | 2 +- ...art-update-version-service-unreachable.yml | 2 +- e2e-tests/smart-update2/conf/smart-update.yml | 2 +- e2e-tests/smart-update2/conf/vs.yml | 2 +- e2e-tests/smart-update2/run | 92 ++++++++++++++++++- 21 files changed, 384 insertions(+), 34 deletions(-) create mode 100644 e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4-cw.log.json create mode 100644 e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json create mode 100644 e2e-tests/smart-update2/compare/test1.yaml create mode 100644 e2e-tests/smart-update2/compare/test2.yaml create mode 100644 e2e-tests/smart-update2/conf/smart-update-pmm3.yaml diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 59d3d08a83..ad2febde2a 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -1,25 +1,37 @@ auto-tuning,8.0 allocator,8.0 +allocator,8.4 backup-storage-tls,8.0 +backup-storage-tls,8.4 cross-site,8.0 custom-users,8.0 demand-backup-cloud,8.0 +demand-backup-cloud,8.4 demand-backup-encrypted-with-tls,8.0 +demand-backup-encrypted-with-tls,8.4 demand-backup,8.0 +demand-backup,8.4 demand-backup-flow-control,8.0 +demand-backup-flow-control,8.4 demand-backup-parallel,8.0 +demand-backup-parallel,8.4 demand-backup-without-passwords,8.0 +demand-backup-without-passwords,8.4 haproxy,5.7 haproxy,8.0 +haproxy,8.4 init-deploy,5.7 init-deploy,8.0 limits,8.0 monitoring-2-0,8.0 monitoring-pmm3,8.0 +monitoring-pmm3,8.4 one-pod,5.7 one-pod,8.0 pitr,8.0 +pitr,8.4 pitr-gap-errors,8.0 +pitr-gap-errors,8.4 proxy-protocol,8.0 proxy-switch,8.0 proxysql-sidecar-res-limits,8.0 @@ -28,22 +40,27 @@ pvc-resize,5.7 pvc-resize,8.0 recreate,8.0 restore-to-encrypted-cluster,8.0 +restore-to-encrypted-cluster,8.4 scaling-proxysql,8.0 scaling,8.0 scheduled-backup,5.7 scheduled-backup,8.0 +scheduled-backup,8.4 security-context,8.0 smart-update1,8.0 smart-update2,8.0 +smart-update1,8.4 +smart-update2,8.4 storage,8.0 tls-issue-cert-manager-ref,8.0 tls-issue-cert-manager,8.0 tls-issue-self,8.0 upgrade-consistency,8.0 +upgrade-consistency,8.4 upgrade-haproxy,5.7 upgrade-haproxy,8.0 upgrade-proxysql,5.7 upgrade-proxysql,8.0 users,5.7 users,8.0 -validation-hook,8.0 +validation-hook,8.0 \ No newline at end of file diff --git a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json index fd2d63de4f..93d471424f 100644 --- a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json +++ b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json @@ -1,11 +1,35 @@ { "backup": { - "8.0.11": { - ">=": [ + "8.4.0": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, + "8.0.14": { + "and": [ { - "var": "productVersion" + ">=": [ + { + "var": "productVersion" + }, + "8.0" + ] }, - "8.0" + { + "<": [ + { + "var": "productVersion" + }, + "8.4" + ] + } ] }, "2.4.20": { diff --git a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json index a9d4ea0c69..34d3b61be4 100644 --- a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json @@ -5,6 +5,18 @@ "product": "pxc-operator", "matrix": { "pxc": { + "8.4.6-6.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", + "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", + "status": "available", + "critical": false + }, + "8.4.5-5.1": { + "image_path": "percona/percona-xtradb-cluster:8.4.5-5.1", + "image_hash": "918c54c11c96bf61bb3f32315ef6b344b7b1d68a0457a47a3804eca3932b2b17", + "status": "available", + "critical": false + }, "8.0.20-11.2": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", "image_hash": "feda5612db18da824e971891d6084465aa9cdc9918c18001cd95ba30916da78b", @@ -67,15 +79,15 @@ } }, "pmm": { - "2.0.0": { - "image_path": "perconalab/percona-xtradb-cluster-operator:main-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "2.44.1-1": { + "image_path": "percona/pmm-client:2.44.1-1", + "image_hash": "52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "status": "recommended", "critical": false }, - "1.17.1": { - "image_path": "percona/percona-xtradb-cluster-operator:1.6.0-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "3.4.1": { + "image_path": "percona/pmm-client:3.4.1", + "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", "status": "recommended", "critical": false } @@ -97,6 +109,12 @@ } }, "backup": { + "8.4.0-4.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup", + "image_hash": "sha256:40a22aa9f83d08c4a79db4a947cdab2e316d7e03535ae8874c6e6ec7bfd11938", + "status": "available", + "critical": false + }, "8.0.14": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup", "image_hash": "3d57e1174bac5c1c10b253437205682445c1f72c9b2b462bc8375e211c0265b5", diff --git a/e2e-tests/smart-update1/conf/smart-update-haproxy.yml b/e2e-tests/smart-update1/conf/smart-update-haproxy.yml index 5d36197559..fe77639a17 100644 --- a/e2e-tests/smart-update1/conf/smart-update-haproxy.yml +++ b/e2e-tests/smart-update1/conf/smart-update-haproxy.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml b/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml index 5d36197559..fe77639a17 100644 --- a/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml +++ b/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml b/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml index f26dfca4c8..c33a6f08dc 100644 --- a/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml +++ b/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update.yml b/e2e-tests/smart-update1/conf/smart-update.yml index fa58e6e869..fe8273630d 100644 --- a/e2e-tests/smart-update1/conf/smart-update.yml +++ b/e2e-tests/smart-update1/conf/smart-update.yml @@ -50,7 +50,7 @@ spec: antiAffinityTopologyKey: "kubernetes.io/hostname" pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/vs.yml b/e2e-tests/smart-update1/conf/vs.yml index f1513b3fac..21210cd158 100644 --- a/e2e-tests/smart-update1/conf/vs.yml +++ b/e2e-tests/smart-update1/conf/vs.yml @@ -18,7 +18,7 @@ spec: - env: - name: SERVE_HTTP value: "true" - image: perconalab/version-service:main-e378a19 + image: perconalab/version-service:main-latest imagePullPolicy: IfNotPresent name: version-service ports: diff --git a/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4-cw.log.json b/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4-cw.log.json new file mode 100644 index 0000000000..18fffb1a82 --- /dev/null +++ b/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4-cw.log.json @@ -0,0 +1,21 @@ +{ + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.request.content": { + "msg": { + "product": "pxc-operator", + "operatorVersion": "9.9.9", + "apply": "8.4-latest", + "clusterWideEnabled": true + } + } +} +{ + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.code": "OK" +} diff --git a/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json b/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json new file mode 100644 index 0000000000..ebb9af6ed5 --- /dev/null +++ b/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json @@ -0,0 +1,20 @@ +{ + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.request.content": { + "msg": { + "product": "pxc-operator", + "operatorVersion": "9.9.9", + "apply": "8.4-latest", + } + } +} +{ + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.code": "OK" +} diff --git a/e2e-tests/smart-update2/compare/test1.yaml b/e2e-tests/smart-update2/compare/test1.yaml new file mode 100644 index 0000000000..798e28262a --- /dev/null +++ b/e2e-tests/smart-update2/compare/test1.yaml @@ -0,0 +1,23 @@ + cat /var/folders/ww/_6wsbxjd5ysbpmgksfyrp2640000gn/T/tmp.k1D2dKMLy9/disabled_telemetry.version-service-cr.log.json + { + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grcp.start_time": "2025-12-03T10:55:47Z", + "grpc.request.content": { + "msg": { + "product": "pxc-operator", + "operatorVersion": "9.9.9", + "apply": "8.4-latest", + "platform": "kubernetes" + } + } + } + { + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.code": "OK" + } \ No newline at end of file diff --git a/e2e-tests/smart-update2/compare/test2.yaml b/e2e-tests/smart-update2/compare/test2.yaml new file mode 100644 index 0000000000..fb3365a556 --- /dev/null +++ b/e2e-tests/smart-update2/compare/test2.yaml @@ -0,0 +1,21 @@ + + cat /Users/marukovich/github-projects/percona/percona-xtradb-cluster-operator/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json + { + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.request.content": { + "msg": { + "product": "pxc-operator", + "operatorVersion": "9.9.9", + "apply": "8.4-latest", + } + } + } + { + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.code": "OK" + } \ No newline at end of file diff --git a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json index efefe44722..93d471424f 100644 --- a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json +++ b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json @@ -1,11 +1,35 @@ { "backup": { + "8.4.0": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, "8.0.14": { - ">=": [ + "and": [ { - "var": "productVersion" + ">=": [ + { + "var": "productVersion" + }, + "8.0" + ] }, - "8.0" + { + "<": [ + { + "var": "productVersion" + }, + "8.4" + ] + } ] }, "2.4.20": { diff --git a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json index 0ff8ff14d8..f93797cf0b 100644 --- a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json @@ -5,6 +5,18 @@ "product": "pxc-operator", "matrix": { "pxc": { + "8.4.6-6.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", + "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", + "status": "available", + "critical": false + }, + "8.4.5-5.1": { + "image_path": "percona/percona-xtradb-cluster:8.4.5-5.1", + "image_hash": "918c54c11c96bf61bb3f32315ef6b344b7b1d68a0457a47a3804eca3932b2b17", + "status": "available", + "critical": false + }, "8.0.20-11.2": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", "image_hash": "feda5612db18da824e971891d6084465aa9cdc9918c18001cd95ba30916da78b", @@ -67,15 +79,15 @@ } }, "pmm": { - "2.0.0": { - "image_path": "perconalab/percona-xtradb-cluster-operator:main-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "2.44.1-1": { + "image_path": "percona/pmm-client:2.44.1-1", + "image_hash": "52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "status": "recommended", "critical": false }, - "1.17.1": { - "image_path": "percona/percona-xtradb-cluster-operator:1.6.0-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "3.4.1": { + "image_path": "percona/pmm-client:3.4.1", + "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", "status": "recommended", "critical": false } @@ -97,6 +109,12 @@ } }, "backup": { + "8.4.0-4.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup", + "image_hash": "sha256:40a22aa9f83d08c4a79db4a947cdab2e316d7e03535ae8874c6e6ec7bfd11938", + "status": "available", + "critical": false + }, "8.0.14": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup", "image_hash": "3d57e1174bac5c1c10b253437205682445c1f72c9b2b462bc8375e211c0265b5", diff --git a/e2e-tests/smart-update2/conf/smart-update-haproxy.yml b/e2e-tests/smart-update2/conf/smart-update-haproxy.yml index 5d36197559..fe77639a17 100644 --- a/e2e-tests/smart-update2/conf/smart-update-haproxy.yml +++ b/e2e-tests/smart-update2/conf/smart-update-haproxy.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/conf/smart-update-pmm3.yaml b/e2e-tests/smart-update2/conf/smart-update-pmm3.yaml new file mode 100644 index 0000000000..2c3342e773 --- /dev/null +++ b/e2e-tests/smart-update2/conf/smart-update-pmm3.yaml @@ -0,0 +1,80 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: smart-update + finalizers: + - percona.com/delete-pxc-pods-in-order +spec: + crVersion: 9.9.9 + updateStrategy: SmartUpdate + upgradeOptions: + versionServiceEndpoint: https://127.0.0.1/versions + apply: recommended + schedule: "0 4 * * *" + secretsName: my-cluster-secrets + pause: false + pxc: + size: 3 + image: -pxc + resources: + requests: + memory: 2Gi + cpu: "1" + limits: + memory: 2Gi + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 6Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + proxysql: + enabled: false + size: 2 + image: -proxysql + resources: + requests: + memory: 1Gi + cpu: "1" + limits: + memory: 1Gi + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + haproxy: + enabled: true + size: 2 + image: -haproxy + resources: + requests: + memory: 1G + cpu: 600m + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + gracePeriod: 30 + pmm: + enabled: true + image: percona/pmm-client:3.4.0 + serverHost: monitoring-service + serverUser: pmm + backup: + image: -backup + serviceAccountName: percona-xtradb-cluster-operator + storages: + pvc: + type: filesystem + volume: + persistentVolumeClaim: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi \ No newline at end of file diff --git a/e2e-tests/smart-update2/conf/smart-update-version-service-reachable.yml b/e2e-tests/smart-update2/conf/smart-update-version-service-reachable.yml index 5d36197559..fe77639a17 100644 --- a/e2e-tests/smart-update2/conf/smart-update-version-service-reachable.yml +++ b/e2e-tests/smart-update2/conf/smart-update-version-service-reachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml b/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml index f26dfca4c8..e94b8404b9 100644 --- a/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml +++ b/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.0 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/conf/smart-update.yml b/e2e-tests/smart-update2/conf/smart-update.yml index fa58e6e869..fe8273630d 100644 --- a/e2e-tests/smart-update2/conf/smart-update.yml +++ b/e2e-tests/smart-update2/conf/smart-update.yml @@ -50,7 +50,7 @@ spec: antiAffinityTopologyKey: "kubernetes.io/hostname" pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/conf/vs.yml b/e2e-tests/smart-update2/conf/vs.yml index f1513b3fac..21210cd158 100644 --- a/e2e-tests/smart-update2/conf/vs.yml +++ b/e2e-tests/smart-update2/conf/vs.yml @@ -18,7 +18,7 @@ spec: - env: - name: SERVE_HTTP value: "true" - image: perconalab/version-service:main-e378a19 + image: perconalab/version-service:main-latest imagePullPolicy: IfNotPresent name: version-service ports: diff --git a/e2e-tests/smart-update2/run b/e2e-tests/smart-update2/run index dc1a84f132..8469e83a4b 100755 --- a/e2e-tests/smart-update2/run +++ b/e2e-tests/smart-update2/run @@ -31,6 +31,13 @@ VS_URL="http://version-service" VS_PORT="11000" VS_ENDPOINT="${VS_URL}:${VS_PORT}" +# Determine update strategy based on PXC version +if [[ "${PXC_VER}" == "8.4" ]]; then + VS_UPDATE_STRATEGY="latest" +else + VS_UPDATE_STRATEGY="recommended" +fi + function get_pod_names_images { local cluster=${1} local type=${2:-pxc} @@ -156,10 +163,13 @@ function check_telemetry_transfer() { fi local image_prefix=${cr_vs_channel%'-recommended'} + image_prefix=${image_prefix%'-latest'} local telemetry_cr_log_file="${telemetry_state}_telemetry.version-service-cr-${image_prefix}${OPERATOR_NS:+-cw}.log.json" desc "telemetry was disabled in operator but not in CR" - if [ "${cr_vs_channel}" == "${image_prefix}-recommended" -a "${telemetry_state}" == 'disabled' ]; then + if [[ "${cr_vs_channel}" == "${image_prefix}-recommended" || "${cr_vs_channel}" == "${image_prefix}-latest" ]] && [ "${telemetry_state}" == 'disabled' ]; then desc "cr VS should have telemetry" + cat "${test_dir}/compare/${telemetry_cr_log_file}" + cat "${tmp_dir}/${telemetry_state}_telemetry.version-service-cr.log.json" diff "${test_dir}/compare/${telemetry_cr_log_file}" <(grep -f "${tmp_dir}/${telemetry_state}_telemetry.version-service-cr.log.json" "${test_dir}/compare/${telemetry_cr_log_file}") desc "operator VS should not have telemetry" [[ -s ${tmp_dir}/disabled_telemetry.version-service.log.json ]] && exit 1 @@ -217,7 +227,7 @@ function main() { sleep 30 wait_pod "$(get_operator_pod)" "480" "${OPERATOR_NS}" - check_telemetry_transfer "http://version-service-cr.${namespace}.svc.cluster.local:11000" "${IMAGE_PREFIX}-recommended" "disabled" + check_telemetry_transfer "http://version-service-cr.${namespace}.svc.cluster.local:11000" "${IMAGE_PREFIX}-${VS_UPDATE_STRATEGY}" "disabled" kubectl_bin delete pod -l run=version-service-cr kubectl_bin delete pod -l run=version-service check_telemetry_transfer "http://version-service-cr.${namespace}.svc.cluster.local:11000" "disabled" "disabled" @@ -237,8 +247,8 @@ function main() { fi ################################################## - desc 'PXC cluster update with recommended image by version service' - vs_image="recommended" + desc "PXC cluster update with ${VS_UPDATE_STRATEGY} image by version service" + vs_image="${VS_UPDATE_STRATEGY}" initial_primary=$(run_mysql 'SELECT @@hostname hostname;' "-h ${CLUSTER}-haproxy -uroot -proot_password") kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"'${vs_image}'","schedule": "* * * * *"}}}' @@ -289,6 +299,80 @@ function main() { kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" kubectl_bin delete pvc --all + ################################################## + desc 'PMM2 cluster update with the recommended image by version service' + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + + # Enable PMM2 with older version + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"pmm":{"enabled":true,"image":"percona/pmm-client:2.44.0","serverHost":"monitoring-service"}}}' + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Get recommended PMM2 image from version service + pmm2_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:2")) | select(.value.status == "recommended") | .value.imagePath') + + desc "Updating PMM2 to recommended: ${pmm2_recommended_image}" + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' + sleep 55 + + desc "Waiting for PMM2 containers to update..." + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Verify PMM2 updated + for i in $(seq 0 $((CLUSTER_SIZE - 1))); do + actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + if [[ "${actual_pmm_image}" != *"${pmm2_recommended_image}"* ]]; then + echo "ERROR: PMM2 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm2_recommended_image}, Got: ${actual_pmm_image}" + exit 1 + fi + done + desc "PMM2 successfully updated to ${pmm2_recommended_image}" + + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + kubectl_bin delete pvc --all + + ################################################## + desc 'PMM3 cluster update with the recommended image by version service' + + desc "Updating secret for PMM3 (pmmserver -> pmmservertoken)" + # Get current pmmserver value from my-cluster-secrets + pmm_password=$(kubectl_bin get secret my-cluster-secrets -o jsonpath='{.data.pmmserver}') + + # Patch my-cluster-secrets: remove pmmserver, add pmmservertoken + kubectl_bin patch secret my-cluster-secrets --type=json \ + -p '[{"op":"remove","path":"/data/pmmserver"},{"op":"add","path":"/data/pmmservertoken","value":"'${pmm_password}'"}]' + + cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yaml" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Get recommended PMM3 image from version service + pmm3_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:3")) | select(.value.status == "recommended") | .value.imagePath') + + desc "Updating PMM3 to recommended: ${pmm3_recommended_image}" + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' + sleep 55 + + desc "Waiting for PMM3 containers to update..." + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Verify PMM3 updated + for i in $(seq 0 $((CLUSTER_SIZE - 1))); do + actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + if [[ "${actual_pmm_image}" != *"${pmm3_recommended_image}"* ]]; then + echo "ERROR: PMM3 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm3_recommended_image}, Got: ${actual_pmm_image}" + exit 1 + fi + done + desc "PMM3 successfully updated to ${pmm3_recommended_image}" + + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yaml" + kubectl_bin delete pvc --all + desc 'cleanup' kubectl_bin delete -f "${test_dir}/conf/vs.yml" destroy "${namespace}" From ec003af54324d84ee91fbde191b770608b8a8d8d Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Thu, 4 Dec 2025 22:05:28 +0100 Subject: [PATCH 02/21] separate tests --- e2e-tests/smart-update2/compare/test1.yaml | 23 --- e2e-tests/smart-update2/compare/test2.yaml | 21 --- e2e-tests/smart-update2/run | 74 --------- e2e-tests/smart-update3/compare/select-1.sql | 1 + .../conf/operator.9.9.9.pxc-operator.dep.json | 56 +++++++ .../conf/operator.9.9.9.pxc-operator.json | 150 ++++++++++++++++++ .../conf/smart-update-pmm3.yaml | 0 ...art-update-version-service-unreachable.yml | 80 ++++++++++ e2e-tests/smart-update3/conf/vs.yml | 50 ++++++ e2e-tests/smart-update3/run | 133 ++++++++++++++++ 10 files changed, 470 insertions(+), 118 deletions(-) delete mode 100644 e2e-tests/smart-update2/compare/test1.yaml delete mode 100644 e2e-tests/smart-update2/compare/test2.yaml create mode 100644 e2e-tests/smart-update3/compare/select-1.sql create mode 100644 e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.dep.json create mode 100644 e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json rename e2e-tests/{smart-update2 => smart-update3}/conf/smart-update-pmm3.yaml (100%) create mode 100644 e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml create mode 100644 e2e-tests/smart-update3/conf/vs.yml create mode 100755 e2e-tests/smart-update3/run diff --git a/e2e-tests/smart-update2/compare/test1.yaml b/e2e-tests/smart-update2/compare/test1.yaml deleted file mode 100644 index 798e28262a..0000000000 --- a/e2e-tests/smart-update2/compare/test1.yaml +++ /dev/null @@ -1,23 +0,0 @@ - cat /var/folders/ww/_6wsbxjd5ysbpmgksfyrp2640000gn/T/tmp.k1D2dKMLy9/disabled_telemetry.version-service-cr.log.json - { - "system": "grpc", - "span.kind": "server", - "grpc.service": "version.VersionService", - "grpc.method": "Apply", - "grcp.start_time": "2025-12-03T10:55:47Z", - "grpc.request.content": { - "msg": { - "product": "pxc-operator", - "operatorVersion": "9.9.9", - "apply": "8.4-latest", - "platform": "kubernetes" - } - } - } - { - "system": "grpc", - "span.kind": "server", - "grpc.service": "version.VersionService", - "grpc.method": "Apply", - "grpc.code": "OK" - } \ No newline at end of file diff --git a/e2e-tests/smart-update2/compare/test2.yaml b/e2e-tests/smart-update2/compare/test2.yaml deleted file mode 100644 index fb3365a556..0000000000 --- a/e2e-tests/smart-update2/compare/test2.yaml +++ /dev/null @@ -1,21 +0,0 @@ - + cat /Users/marukovich/github-projects/percona/percona-xtradb-cluster-operator/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json - { - "system": "grpc", - "span.kind": "server", - "grpc.service": "version.VersionService", - "grpc.method": "Apply", - "grpc.request.content": { - "msg": { - "product": "pxc-operator", - "operatorVersion": "9.9.9", - "apply": "8.4-latest", - } - } - } - { - "system": "grpc", - "span.kind": "server", - "grpc.service": "version.VersionService", - "grpc.method": "Apply", - "grpc.code": "OK" - } \ No newline at end of file diff --git a/e2e-tests/smart-update2/run b/e2e-tests/smart-update2/run index 8469e83a4b..bc50b8a5b7 100755 --- a/e2e-tests/smart-update2/run +++ b/e2e-tests/smart-update2/run @@ -299,80 +299,6 @@ function main() { kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" kubectl_bin delete pvc --all - ################################################## - desc 'PMM2 cluster update with the recommended image by version service' - spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" - - # Enable PMM2 with older version - kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"pmm":{"enabled":true,"image":"percona/pmm-client:2.44.0","serverHost":"monitoring-service"}}}' - wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - - # Get recommended PMM2 image from version service - pmm2_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:2")) | select(.value.status == "recommended") | .value.imagePath') - - desc "Updating PMM2 to recommended: ${pmm2_recommended_image}" - kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' - sleep 55 - - desc "Waiting for PMM2 containers to update..." - wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - - # Verify PMM2 updated - for i in $(seq 0 $((CLUSTER_SIZE - 1))); do - actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') - if [[ "${actual_pmm_image}" != *"${pmm2_recommended_image}"* ]]; then - echo "ERROR: PMM2 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm2_recommended_image}, Got: ${actual_pmm_image}" - exit 1 - fi - done - desc "PMM2 successfully updated to ${pmm2_recommended_image}" - - kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" - kubectl_bin delete pvc --all - - ################################################## - desc 'PMM3 cluster update with the recommended image by version service' - - desc "Updating secret for PMM3 (pmmserver -> pmmservertoken)" - # Get current pmmserver value from my-cluster-secrets - pmm_password=$(kubectl_bin get secret my-cluster-secrets -o jsonpath='{.data.pmmserver}') - - # Patch my-cluster-secrets: remove pmmserver, add pmmservertoken - kubectl_bin patch secret my-cluster-secrets --type=json \ - -p '[{"op":"remove","path":"/data/pmmserver"},{"op":"add","path":"/data/pmmservertoken","value":"'${pmm_password}'"}]' - - cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yaml" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - - wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - - # Get recommended PMM3 image from version service - pmm3_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:3")) | select(.value.status == "recommended") | .value.imagePath') - - desc "Updating PMM3 to recommended: ${pmm3_recommended_image}" - kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' - sleep 55 - - desc "Waiting for PMM3 containers to update..." - wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - - # Verify PMM3 updated - for i in $(seq 0 $((CLUSTER_SIZE - 1))); do - actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') - if [[ "${actual_pmm_image}" != *"${pmm3_recommended_image}"* ]]; then - echo "ERROR: PMM3 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm3_recommended_image}, Got: ${actual_pmm_image}" - exit 1 - fi - done - desc "PMM3 successfully updated to ${pmm3_recommended_image}" - - kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yaml" - kubectl_bin delete pvc --all - desc 'cleanup' kubectl_bin delete -f "${test_dir}/conf/vs.yml" destroy "${namespace}" diff --git a/e2e-tests/smart-update3/compare/select-1.sql b/e2e-tests/smart-update3/compare/select-1.sql new file mode 100644 index 0000000000..8e738f4cf2 --- /dev/null +++ b/e2e-tests/smart-update3/compare/select-1.sql @@ -0,0 +1 @@ +100500 diff --git a/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.dep.json b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.dep.json new file mode 100644 index 0000000000..93d471424f --- /dev/null +++ b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.dep.json @@ -0,0 +1,56 @@ +{ + "backup": { + "8.4.0": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, + "8.0.14": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.0" + ] + }, + { + "<": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, + "2.4.20": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "5.7" + ] + }, + { + "<": [ + { + "var": "productVersion" + }, + "8.0" + ] + } + ] + } + } +} diff --git a/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json new file mode 100644 index 0000000000..f93797cf0b --- /dev/null +++ b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json @@ -0,0 +1,150 @@ +{ + "versions": [ + { + "operator": "9.9.9", + "product": "pxc-operator", + "matrix": { + "pxc": { + "8.4.6-6.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", + "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", + "status": "available", + "critical": false + }, + "8.4.5-5.1": { + "image_path": "percona/percona-xtradb-cluster:8.4.5-5.1", + "image_hash": "918c54c11c96bf61bb3f32315ef6b344b7b1d68a0457a47a3804eca3932b2b17", + "status": "available", + "critical": false + }, + "8.0.20-11.2": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", + "image_hash": "feda5612db18da824e971891d6084465aa9cdc9918c18001cd95ba30916da78b", + "status": "recommended", + "critical": false + }, + "8.0.20-11.1": { + "image_path": "percona/percona-xtradb-cluster:8.0.20-11.1", + "image_hash": "54b1b2f5153b78b05d651034d4603a13e685cbb9b45bfa09a39864fa3f169349", + "status": "available", + "critical": false + }, + "8.0.19-10.1": { + "image_path": "percona/percona-xtradb-cluster:8.0.19-10.1", + "image_hash": "1058ae8eded735ebdf664807aad7187942fc9a1170b3fd0369574cb61206b63a", + "status": "available", + "critical": false + }, + "5.7.31-31.45.2": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7", + "image_hash": "0decf85c7c7afacc438f5fe355dc8320ea7ffc7018ca2cb6bda3ac0c526ae172", + "status": "recommended", + "critical": false + }, + "5.7.31-31.45": { + "image_path": "percona/percona-xtradb-cluster:5.7.31-31.45", + "image_hash": "3852cef43cc0c6aa791463ba6279e59dcdac3a4fb1a5616c745c1b3c68041dc2", + "status": "available", + "critical": false + }, + "5.7.30-31.43": { + "image_path": "percona/percona-xtradb-cluster:5.7.30-31.43", + "image_hash": "b03a060e9261b37288a2153c78f86dcfc53367c36e1bcdcae046dd2d0b0721af", + "status": "available", + "critical": false + }, + "5.7.29-31.43": { + "image_path": "percona/percona-xtradb-cluster:5.7.29-31.43", + "image_hash": "85fb479de073770280ae601cf3ec22dc5c8cca4c8b0dc893b09503767338e6f9", + "status": "available", + "critical": false + }, + "5.7.28-31.41.2": { + "image_path": "percona/percona-xtradb-cluster:5.7.28-31.41.2", + "image_hash": "fccd6525aaeedb5e436e9534e2a63aebcf743c043526dd05dba8519ebddc8b30", + "status": "available", + "critical": true + }, + "5.7.27-31.39": { + "image_path": "percona/percona-xtradb-cluster:5.7.27-31.39", + "image_hash": "7d8eb4d2031c32c6e96451655f359d8e5e8e047dc95bada9a28c41c158876c26", + "status": "available", + "critical": false + }, + "5.7.26-31.37": { + "image_path": "percona/percona-xtradb-cluster:5.7.26-31.37", + "image_hash": "9d43d8e435e4aca5c694f726cc736667cb938158635c5f01a0e9412905f1327f", + "status": "available", + "critical": false + } + }, + "pmm": { + "2.44.1-1": { + "image_path": "percona/pmm-client:2.44.1-1", + "image_hash": "52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", + "status": "recommended", + "critical": false + }, + "3.4.1": { + "image_path": "percona/pmm-client:3.4.1", + "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", + "status": "recommended", + "critical": false + } + }, + "proxysql": { + "2.0.14": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-proxysql", + "image_hash": "5c0ee8cb56f3a9cd01b907c2edddc8265b9d84d58a48bae31f8ee460d40ad3d6", + "status": "recommended", + "critical": false + } + }, + "haproxy": { + "2.1.7": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-haproxy", + "image_hash": "59bcc3ae1e3aadb410a89ed266102045437753a82e79501caa74d40c529a9955", + "status": "recommended", + "critical": false + } + }, + "backup": { + "8.4.0-4.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup", + "image_hash": "sha256:40a22aa9f83d08c4a79db4a947cdab2e316d7e03535ae8874c6e6ec7bfd11938", + "status": "available", + "critical": false + }, + "8.0.14": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup", + "image_hash": "3d57e1174bac5c1c10b253437205682445c1f72c9b2b462bc8375e211c0265b5", + "status": "recommended", + "critical": false + }, + "2.4.20": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup", + "image_hash": "6b7e5f284e99553ab6a0c1dc3d8104b3e908d2bac8a71d52d2ea068c3df7d252", + "status": "recommended", + "critical": false + } + }, + "log_collector": { + "1.16.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-logcollector", + "image_hash": "122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9", + "status": "recommended", + "critical": false + } + }, + "operator": { + "9.9.9": { + "image_path": "percona/percona-xtradb-cluster-operator:main", + "image_hash": "9871d6fb960b4ec498430a398a44eca08873591a6b6efb8a35349e79e24f3072", + "status": "recommended", + "critical": false + } + } + } + } + ] +} diff --git a/e2e-tests/smart-update2/conf/smart-update-pmm3.yaml b/e2e-tests/smart-update3/conf/smart-update-pmm3.yaml similarity index 100% rename from e2e-tests/smart-update2/conf/smart-update-pmm3.yaml rename to e2e-tests/smart-update3/conf/smart-update-pmm3.yaml diff --git a/e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml b/e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml new file mode 100644 index 0000000000..e94b8404b9 --- /dev/null +++ b/e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml @@ -0,0 +1,80 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: smart-update + finalizers: + - percona.com/delete-pxc-pods-in-order +spec: + crVersion: 9.9.9 + updateStrategy: SmartUpdate + upgradeOptions: + versionServiceEndpoint: https://127.0.0.1/versions + apply: recommended + schedule: "0 4 * * *" + secretsName: my-cluster-secrets + pause: false + pxc: + size: 3 + image: -pxc + resources: + requests: + memory: 2Gi + cpu: "1" + limits: + memory: 2Gi + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 6Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + proxysql: + enabled: false + size: 2 + image: -proxysql + resources: + requests: + memory: 1Gi + cpu: "1" + limits: + memory: 1Gi + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + haproxy: + enabled: true + size: 2 + image: -haproxy + resources: + requests: + memory: 1G + cpu: 600m + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + gracePeriod: 30 + pmm: + enabled: false + image: percona/pmm-client:2.44.0 + serverHost: monitoring-service + serverUser: pmm + backup: + image: -backup + serviceAccountName: percona-xtradb-cluster-operator + storages: + pvc: + type: filesystem + volume: + persistentVolumeClaim: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi diff --git a/e2e-tests/smart-update3/conf/vs.yml b/e2e-tests/smart-update3/conf/vs.yml new file mode 100644 index 0000000000..21210cd158 --- /dev/null +++ b/e2e-tests/smart-update3/conf/vs.yml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + run: version-service + name: version-service +spec: + replicas: 1 + selector: + matchLabels: + run: version-service + template: + metadata: + labels: + run: version-service + spec: + containers: + - env: + - name: SERVE_HTTP + value: "true" + image: perconalab/version-service:main-latest + imagePullPolicy: IfNotPresent + name: version-service + ports: + - containerPort: 11000 + protocol: TCP + volumeMounts: + - name: versions + mountPath: /sources/operator.9.9.9.pxc-operator.dep.json + subPath: operator.9.9.9.pxc-operator.dep.json + - name: versions + mountPath: /sources/operator.9.9.9.pxc-operator.json + subPath: operator.9.9.9.pxc-operator.json + volumes: + - name: versions + configMap: + name: versions +--- +apiVersion: v1 +kind: Service +metadata: + name: version-service +spec: + ports: + - port: 11000 + protocol: TCP + targetPort: 11000 + selector: + run: version-service + type: ClusterIP diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run new file mode 100755 index 0000000000..04699307f5 --- /dev/null +++ b/e2e-tests/smart-update3/run @@ -0,0 +1,133 @@ +#!/bin/bash +# CASES: +# - Update PMM2 to recommended version via version service +# - Update PMM3 to recommended version via version service + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions + +set_debug + +API='pxc.percona.com/v9-9-9' +TARGET_IMAGE_PXC=${IMAGE_PXC} +CLUSTER="smart-update" +CLUSTER_SIZE=3 +PROXY_SIZE=2 + +if [[ ${TARGET_IMAGE_PXC} == *"percona-xtradb-cluster-operator"* ]]; then + PXC_VER=$(echo -n "${TARGET_IMAGE_PXC}" | $sed -r 's/.*([0-9].[0-9])$/\1/') +else + PXC_VER=$(echo -n "${TARGET_IMAGE_PXC}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') +fi +VS_URL="http://version-service" +VS_PORT="11000" +VS_ENDPOINT="${VS_URL}:${VS_PORT}" + +function deploy_version_service { + desc 'install version service' + kubectl_bin create configmap versions \ + --from-file "${test_dir}/conf/operator.9.9.9.pxc-operator.dep.json" \ + --from-file "${test_dir}/conf/operator.9.9.9.pxc-operator.json" + kubectl_bin apply -f "${test_dir}/conf/vs.yml" + sleep 10 +} + +function main() { + create_infra "${namespace}" + deploy_version_service + deploy_cert_manager + + kubectl_bin patch crd perconaxtradbclusters.pxc.percona.com --type='json' -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' + kubectl_bin ${OPERATOR_NS:+-n $OPERATOR_NS} set env deploy/percona-xtradb-cluster-operator "PERCONA_VS_FALLBACK_URI=http://version-service.${namespace}.svc.cluster.local:11000" + + ################################################## + desc 'PMM2 cluster update with the recommended image by version service' + + # Prepare cluster config + cp -f "${test_dir}/conf/${CLUSTER}-version-service-unreachable.yml" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + + # Enable PMM2 with older version + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"pmm":{"enabled":true,"image":"percona/pmm-client:2.44.0","serverHost":"monitoring-service"}}}' + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Get recommended PMM2 image from version service + pmm2_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:2")) | select(.value.status == "recommended") | .value.imagePath') + + desc "Updating PMM2 to recommended: ${pmm2_recommended_image}" + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' + sleep 55 + + desc "Waiting for PMM2 containers to update..." + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Verify PMM2 updated + for i in $(seq 0 $((CLUSTER_SIZE - 1))); do + actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + if [[ "${actual_pmm_image}" != *"${pmm2_recommended_image}"* ]]; then + echo "ERROR: PMM2 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm2_recommended_image}, Got: ${actual_pmm_image}" + exit 1 + fi + done + desc "PMM2 successfully updated to ${pmm2_recommended_image}" + + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + kubectl_bin delete pvc --all + + ################################################## + desc 'PMM3 cluster update with the recommended image by version service' + + desc "Updating secret for PMM3 (pmmserver -> pmmservertoken)" + # Get current pmmserver value from my-cluster-secrets + pmm_password=$(kubectl_bin get secret my-cluster-secrets -o jsonpath='{.data.pmmserver}') + + # Patch my-cluster-secrets: remove pmmserver, add pmmservertoken + kubectl_bin patch secret my-cluster-secrets --type=json \ + -p '[{"op":"remove","path":"/data/pmmserver"},{"op":"add","path":"/data/pmmservertoken","value":"'${pmm_password}'"}]' + + # Prepare PMM3 cluster config + cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yaml" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yaml" + + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Get recommended PMM3 image from version service + pmm3_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:3")) | select(.value.status == "recommended") | .value.imagePath') + + desc "Updating PMM3 to recommended: ${pmm3_recommended_image}" + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' + sleep 55 + + desc "Waiting for PMM3 containers to update..." + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Verify PMM3 updated + for i in $(seq 0 $((CLUSTER_SIZE - 1))); do + actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + if [[ "${actual_pmm_image}" != *"${pmm3_recommended_image}"* ]]; then + echo "ERROR: PMM3 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm3_recommended_image}, Got: ${actual_pmm_image}" + exit 1 + fi + done + desc "PMM3 successfully updated to ${pmm3_recommended_image}" + + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yaml" + kubectl_bin delete pvc --all + + desc 'cleanup' + kubectl_bin delete -f "${test_dir}/conf/vs.yml" + destroy "${namespace}" + desc "test passed" +} + +main \ No newline at end of file From 088b939d40bcd1cb3847bff6cdc4d90bba45bf20 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Thu, 4 Dec 2025 22:09:18 +0100 Subject: [PATCH 03/21] add to test --- e2e-tests/run-pr.csv | 1 + e2e-tests/run-release.csv | 1 + 2 files changed, 2 insertions(+) diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index ad2febde2a..3f1cc407aa 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -51,6 +51,7 @@ smart-update1,8.0 smart-update2,8.0 smart-update1,8.4 smart-update2,8.4 +smart-update3,8.0 storage,8.0 tls-issue-cert-manager-ref,8.0 tls-issue-cert-manager,8.0 diff --git a/e2e-tests/run-release.csv b/e2e-tests/run-release.csv index 14b265aa1a..4197fb9a96 100644 --- a/e2e-tests/run-release.csv +++ b/e2e-tests/run-release.csv @@ -35,6 +35,7 @@ self-healing-advanced-chaos self-healing-chaos smart-update1 smart-update2 +smart-update3 storage tls-issue-cert-manager tls-issue-cert-manager-ref From 1ec70251d06891880270a4115217163f29c7d66a Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Fri, 5 Dec 2025 08:31:06 +0100 Subject: [PATCH 04/21] delete pitr --- e2e-tests/run-pr.csv | 2 -- 1 file changed, 2 deletions(-) diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 3f1cc407aa..9a5a209d7e 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -29,9 +29,7 @@ monitoring-pmm3,8.4 one-pod,5.7 one-pod,8.0 pitr,8.0 -pitr,8.4 pitr-gap-errors,8.0 -pitr-gap-errors,8.4 proxy-protocol,8.0 proxy-switch,8.0 proxysql-sidecar-res-limits,8.0 From 340abfc4c4ee76ddf35d750b845e79f394ef17a6 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Mon, 8 Dec 2025 15:05:52 +0100 Subject: [PATCH 05/21] fix the test --- ...update-pmm3.yaml => smart-update-pmm3.yml} | 0 e2e-tests/smart-update3/run | 109 +++++++++++++++--- 2 files changed, 95 insertions(+), 14 deletions(-) rename e2e-tests/smart-update3/conf/{smart-update-pmm3.yaml => smart-update-pmm3.yml} (100%) diff --git a/e2e-tests/smart-update3/conf/smart-update-pmm3.yaml b/e2e-tests/smart-update3/conf/smart-update-pmm3.yml similarity index 100% rename from e2e-tests/smart-update3/conf/smart-update-pmm3.yaml rename to e2e-tests/smart-update3/conf/smart-update-pmm3.yml diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 04699307f5..e53bca9603 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -34,6 +34,65 @@ function deploy_version_service { sleep 10 } +function add_pxc_version_to_vs { + local pxc_version=${1} + local pxc_image=${2} + + desc "Adding PXC version ${pxc_version} to version service" + kubectl_bin get configmap versions -o json | \ + jq --arg ver "${pxc_version}" --arg img "${pxc_image}" \ + '.data["operator.9.9.9.pxc-operator.json"] |= (fromjson | .versions[0].matrix.pxc += {($ver): {"imagePath": $img, "imageHash": "abc123", "status": "available", "critical": false}} | tojson)' | \ + kubectl_bin apply -f - + + # Restart version service to reload config + kubectl_bin delete pod -l run=version-service + sleep 10 +} + +function wait_pmm_update { + local cluster=${1} + local cluster_size=${2} + local expected_image=${3} + local pmm_version=${4} # e.g., "2" or "3" + local max_retry=${5:-120} # Default 10 minutes + + desc "Waiting for PMM${pmm_version} containers to update to ${expected_image}..." + set +x + local retry=0 + echo -n "Waiting for PMM update" + + until [[ $retry -ge $max_retry ]]; do + local updated_count=0 + for i in $(seq 0 $((cluster_size - 1))); do + local actual_pmm_image=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "") + # Only check if actual image contains expected image (handles docker.io prefix) + if [[ "${actual_pmm_image}" == *"${expected_image}"* ]]; then + ((updated_count += 1)) + fi + done + + if [[ ${updated_count} -eq ${cluster_size} ]]; then + echo " Done! All ${cluster_size} pods updated." + set -x + return 0 + fi + + echo -n "." + ((retry += 1)) + sleep 5 + done + + # Timeout reached + set -x + echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" + echo "Expected image: ${expected_image}" + for i in $(seq 0 $((cluster_size - 1))); do + local actual=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "none") + echo " ${cluster}-pxc-${i}: ${actual}" + done + return 1 +} + function main() { create_infra "${namespace}" deploy_version_service @@ -53,18 +112,30 @@ function main() { yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + # Get actual PXC version from running cluster and add to version service + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + ACTUAL_PXC_VERSION=$(kubectl_bin get pxc "${CLUSTER}" -o jsonpath='{.status.pxc.version}') + desc "Detected PXC version: ${ACTUAL_PXC_VERSION}" + add_pxc_version_to_vs "${ACTUAL_PXC_VERSION}" "${IMAGE_PXC}" + # Enable PMM2 with older version kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"pmm":{"enabled":true,"image":"percona/pmm-client:2.44.0","serverHost":"monitoring-service"}}}' wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + # Get initial PMM2 image + initial_pmm2_image=$(kubectl_bin get pod "${CLUSTER}-pxc-0" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + desc "Initial PMM2 image: ${initial_pmm2_image}" + # Get recommended PMM2 image from version service pmm2_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:2")) | select(.value.status == "recommended") | .value.imagePath') + desc "Target PMM2 recommended image: ${pmm2_recommended_image}" - desc "Updating PMM2 to recommended: ${pmm2_recommended_image}" + # Update cluster to use version service with short schedule kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' - sleep 55 - desc "Waiting for PMM2 containers to update..." + # Wait for PMM2 update + wait_pmm_update "${CLUSTER}" "${CLUSTER_SIZE}" "${pmm2_recommended_image}" "2" || exit 1 + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" # Verify PMM2 updated @@ -92,23 +163,33 @@ function main() { -p '[{"op":"remove","path":"/data/pmmserver"},{"op":"add","path":"/data/pmmservertoken","value":"'${pmm_password}'"}]' # Prepare PMM3 cluster config - cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yaml" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yaml" - + cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yml" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yml" + + # Get actual PXC version from running cluster and add to version service wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + ACTUAL_PXC_VERSION=$(kubectl_bin get pxc "${CLUSTER}" -o jsonpath='{.status.pxc.version}') + desc "Detected PXC version for PMM3: ${ACTUAL_PXC_VERSION}" + add_pxc_version_to_vs "${ACTUAL_PXC_VERSION}" "${IMAGE_PXC}" + + # Get initial PMM3 image + initial_pmm3_image=$(kubectl_bin get pod "${CLUSTER}-pxc-0" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + desc "Initial PMM3 image: ${initial_pmm3_image}" # Get recommended PMM3 image from version service pmm3_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:3")) | select(.value.status == "recommended") | .value.imagePath') + desc "Target PMM3 recommended image: ${pmm3_recommended_image}" - desc "Updating PMM3 to recommended: ${pmm3_recommended_image}" + # Update cluster to use version service kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' - sleep 55 - desc "Waiting for PMM3 containers to update..." + # Wait for PMM3 update + wait_pmm_update "${CLUSTER}" "${CLUSTER_SIZE}" "${pmm3_recommended_image}" "3" || exit 1 + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" # Verify PMM3 updated @@ -121,7 +202,7 @@ function main() { done desc "PMM3 successfully updated to ${pmm3_recommended_image}" - kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yaml" + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yml" kubectl_bin delete pvc --all desc 'cleanup' From 9f3228bb804103b43969cb841adda4eb5839515e Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Tue, 9 Dec 2025 14:52:03 +0100 Subject: [PATCH 06/21] fix --- e2e-tests/smart-update3/run | 3 --- 1 file changed, 3 deletions(-) diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index e53bca9603..830b19af76 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -107,9 +107,6 @@ function main() { # Prepare cluster config cp -f "${test_dir}/conf/${CLUSTER}-version-service-unreachable.yml" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" - yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" - yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" - yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" # Get actual PXC version from running cluster and add to version service From 823f59094418efbe9e4d1e8d7f88ddc4036791b9 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Wed, 10 Dec 2025 10:55:19 +0100 Subject: [PATCH 07/21] for debug --- e2e-tests/run-pr.csv | 65 ------------------------------------- e2e-tests/smart-update3/run | 9 +++-- 2 files changed, 6 insertions(+), 68 deletions(-) diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 55af12d591..34d79ec5e5 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -1,66 +1 @@ -auto-tuning,8.0 -allocator,8.0 -allocator,8.4 -backup-storage-tls,8.0 -backup-storage-tls,8.4 -cross-site,8.0 -custom-users,8.0 -demand-backup-cloud,8.0 -demand-backup-cloud,8.4 -demand-backup-encrypted-with-tls,8.0 -demand-backup-encrypted-with-tls,8.4 -demand-backup,8.0 -demand-backup,8.4 -demand-backup-flow-control,8.0 -demand-backup-flow-control,8.4 -demand-backup-parallel,8.0 -demand-backup-parallel,8.4 -demand-backup-without-passwords,8.0 -demand-backup-without-passwords,8.4 -extra-pvc,8.0 -haproxy,5.7 -haproxy,8.0 -haproxy,8.4 -init-deploy,5.7 -init-deploy,8.0 -limits,8.0 -monitoring-2-0,8.0 -monitoring-pmm3,8.0 -monitoring-pmm3,8.4 -one-pod,5.7 -one-pod,8.0 -pitr,8.0 -pitr-gap-errors,8.0 -proxy-protocol,8.0 -proxy-switch,8.0 -proxysql-sidecar-res-limits,8.0 -proxysql-scheduler,8.0 -pvc-resize,5.7 -pvc-resize,8.0 -recreate,8.0 -restore-to-encrypted-cluster,8.0 -restore-to-encrypted-cluster,8.4 -scaling-proxysql,8.0 -scaling,8.0 -scheduled-backup,5.7 -scheduled-backup,8.0 -scheduled-backup,8.4 -security-context,8.0 -smart-update1,8.0 -smart-update2,8.0 -smart-update1,8.4 -smart-update2,8.4 smart-update3,8.0 -storage,8.0 -tls-issue-cert-manager-ref,8.0 -tls-issue-cert-manager,8.0 -tls-issue-self,8.0 -upgrade-consistency,8.0 -upgrade-consistency,8.4 -upgrade-haproxy,5.7 -upgrade-haproxy,8.0 -upgrade-proxysql,5.7 -upgrade-proxysql,8.0 -users,5.7 -users,8.0 -validation-hook,8.0 \ No newline at end of file diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 830b19af76..5c66dee05f 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -106,9 +106,15 @@ function main() { # Prepare cluster config cp -f "${test_dir}/conf/${CLUSTER}-version-service-unreachable.yml" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + cat "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + + desc "DEBUG" + cat "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + desc "DEBUG" spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + # Get actual PXC version from running cluster and add to version service wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" ACTUAL_PXC_VERSION=$(kubectl_bin get pxc "${CLUSTER}" -o jsonpath='{.status.pxc.version}') @@ -162,9 +168,6 @@ function main() { # Prepare PMM3 cluster config cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yml" "${tmp_dir}/${CLUSTER}-pmm3.yml" yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" - yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" - yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" - yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yml" # Get actual PXC version from running cluster and add to version service From 1b79dba69e6bbda9b128d6eacc950eb402955f18 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Wed, 10 Dec 2025 12:34:08 +0100 Subject: [PATCH 08/21] debug --- e2e-tests/functions | 42 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/e2e-tests/functions b/e2e-tests/functions index e866970ca0..7b75539db7 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -954,18 +954,44 @@ apply_secrets() { fi } +#apply_config() { +# local config_file="$1" +# local pvc_name="${2:-}" +# +# if [ -z "$SKIP_REMOTE_BACKUPS" ]; then +# cat_config "$config_file" "$pvc_name" \ +# | kubectl_bin apply -f - +# else +# cat_config "$config_file" "$pvc_name"\ +# | yq eval 'del(.spec.backup.schedule.[1])' - \ +# | kubectl_bin apply -f - +# fi +#} + apply_config() { local config_file="$1" local pvc_name="${2:-}" - if [ -z "$SKIP_REMOTE_BACKUPS" ]; then - cat_config "$config_file" "$pvc_name" \ - | kubectl_bin apply -f - - else - cat_config "$config_file" "$pvc_name"\ - | yq eval 'del(.spec.backup.schedule.[1])' - \ - | kubectl_bin apply -f - - fi + if [ -z "$SKIP_REMOTE_BACKUPS" ]; then + local final_yaml + final_yaml="$(cat_config "$config_file" "$pvc_name")" + + echo "====== FINAL YAML APPLIED ======" + echo "$final_yaml" + echo "================================" + + echo "$final_yaml" | kubectl_bin apply -f - + else + local final_yaml + final_yaml="$(cat_config "$config_file" "$pvc_name" \ + | yq eval 'del(.spec.backup.schedule.[1])' -)" + + echo "====== FINAL YAML APPLIED ======" + echo "$final_yaml" + echo "========================================================" + + echo "$final_yaml" | kubectl_bin apply -f - + fi } get_proxy() { From 0ef056d7ef8507e763f67362c56d1e2b90de11ef Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Wed, 10 Dec 2025 13:44:53 +0100 Subject: [PATCH 09/21] debug --- e2e-tests/functions | 42 ++++--------------- ...-unreachable.yml => smart-update-pmm2.yml} | 0 e2e-tests/smart-update3/run | 12 ++---- 3 files changed, 12 insertions(+), 42 deletions(-) rename e2e-tests/smart-update3/conf/{smart-update-version-service-unreachable.yml => smart-update-pmm2.yml} (100%) diff --git a/e2e-tests/functions b/e2e-tests/functions index 7b75539db7..e866970ca0 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -954,44 +954,18 @@ apply_secrets() { fi } -#apply_config() { -# local config_file="$1" -# local pvc_name="${2:-}" -# -# if [ -z "$SKIP_REMOTE_BACKUPS" ]; then -# cat_config "$config_file" "$pvc_name" \ -# | kubectl_bin apply -f - -# else -# cat_config "$config_file" "$pvc_name"\ -# | yq eval 'del(.spec.backup.schedule.[1])' - \ -# | kubectl_bin apply -f - -# fi -#} - apply_config() { local config_file="$1" local pvc_name="${2:-}" - if [ -z "$SKIP_REMOTE_BACKUPS" ]; then - local final_yaml - final_yaml="$(cat_config "$config_file" "$pvc_name")" - - echo "====== FINAL YAML APPLIED ======" - echo "$final_yaml" - echo "================================" - - echo "$final_yaml" | kubectl_bin apply -f - - else - local final_yaml - final_yaml="$(cat_config "$config_file" "$pvc_name" \ - | yq eval 'del(.spec.backup.schedule.[1])' -)" - - echo "====== FINAL YAML APPLIED ======" - echo "$final_yaml" - echo "========================================================" - - echo "$final_yaml" | kubectl_bin apply -f - - fi + if [ -z "$SKIP_REMOTE_BACKUPS" ]; then + cat_config "$config_file" "$pvc_name" \ + | kubectl_bin apply -f - + else + cat_config "$config_file" "$pvc_name"\ + | yq eval 'del(.spec.backup.schedule.[1])' - \ + | kubectl_bin apply -f - + fi } get_proxy() { diff --git a/e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml b/e2e-tests/smart-update3/conf/smart-update-pmm2.yml similarity index 100% rename from e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml rename to e2e-tests/smart-update3/conf/smart-update-pmm2.yml diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 5c66dee05f..ca1d2d1ea6 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -105,14 +105,10 @@ function main() { desc 'PMM2 cluster update with the recommended image by version service' # Prepare cluster config - cp -f "${test_dir}/conf/${CLUSTER}-version-service-unreachable.yml" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" - cat "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" - yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + cp -f "${test_dir}/conf/${CLUSTER}-version-service-pmm2.yml" "${tmp_dir}/${CLUSTER}-version-service-pmm2.yml" + yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-version-service-pmm2.yml" - desc "DEBUG" - cat "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" - desc "DEBUG" - spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-pmm2.yml" # Get actual PXC version from running cluster and add to version service @@ -151,7 +147,7 @@ function main() { done desc "PMM2 successfully updated to ${pmm2_recommended_image}" - kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-pmm2.yml" kubectl_bin delete pvc --all ################################################## From d5328cbce27c248756628048764c12aa61304dc2 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Wed, 10 Dec 2025 14:59:56 +0100 Subject: [PATCH 10/21] debug --- e2e-tests/smart-update3/run | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index ca1d2d1ea6..9ce29d9d50 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -105,10 +105,10 @@ function main() { desc 'PMM2 cluster update with the recommended image by version service' # Prepare cluster config - cp -f "${test_dir}/conf/${CLUSTER}-version-service-pmm2.yml" "${tmp_dir}/${CLUSTER}-version-service-pmm2.yml" - yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-version-service-pmm2.yml" + cp -f "${test_dir}/conf/${CLUSTER}-pmm2.yml" "${tmp_dir}/${CLUSTER}-pmm2.yml" + yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm2.yml" - spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-pmm2.yml" + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm2.yml" # Get actual PXC version from running cluster and add to version service @@ -129,6 +129,12 @@ function main() { pmm2_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:2")) | select(.value.status == "recommended") | .value.imagePath') desc "Target PMM2 recommended image: ${pmm2_recommended_image}" + # Compare images and exit if they are the same + if [ "${initial_pmm2_image}" == "${pmm2_recommended_image}" ]; then + desc "PMM2 images are the same (${initial_pmm2_image}), skipping upgrade test" + exit 1 + fi + # Update cluster to use version service with short schedule kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' @@ -147,7 +153,7 @@ function main() { done desc "PMM2 successfully updated to ${pmm2_recommended_image}" - kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-pmm2.yml" + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm2.yml" kubectl_bin delete pvc --all ################################################## @@ -180,6 +186,11 @@ function main() { pmm3_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:3")) | select(.value.status == "recommended") | .value.imagePath') desc "Target PMM3 recommended image: ${pmm3_recommended_image}" + if [ "${initial_pmm3_image}" == "${pmm3_recommended_image}" ]; then + desc "PMM3 images are the same (${initial_pmm3_image}), skipping upgrade test" + exit 1 + fi + # Update cluster to use version service kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' From 48647b671e2f2bb440e8d265d0410c8bbf1a1afa Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Thu, 11 Dec 2025 22:38:58 +0100 Subject: [PATCH 11/21] fix smart-update --- e2e-tests/functions | 133 ++++++++++++++++- e2e-tests/monitoring-pmm3/run | 137 +----------------- e2e-tests/run-pr.csv | 68 +++++++++ .../conf/operator.9.9.9.pxc-operator.json | 4 +- e2e-tests/smart-update3/conf/secrets.yml | 33 +++++ .../smart-update3/conf/smart-update-pmm2.yml | 80 ---------- e2e-tests/smart-update3/run | 114 +++++---------- 7 files changed, 273 insertions(+), 296 deletions(-) create mode 100644 e2e-tests/smart-update3/conf/secrets.yml delete mode 100644 e2e-tests/smart-update3/conf/smart-update-pmm2.yml diff --git a/e2e-tests/functions b/e2e-tests/functions index e866970ca0..380da0f775 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -23,8 +23,8 @@ export IMAGE_BACKUP=${IMAGE_BACKUP:-"perconalab/percona-xtradb-cluster-operator: export IMAGE_LOGCOLLECTOR=${IMAGE_LOGCOLLECTOR:-"perconalab/fluentbit:main-logcollector"} export IMAGE_PMM_CLIENT=${IMAGE_PMM_CLIENT:-"perconalab/pmm-client:dev-latest"} export IMAGE_PMM_SERVER=${IMAGE_PMM_SERVER:-"perconalab/pmm-server:dev-latest"} -export IMAGE_PMM3_CLIENT=${IMAGE_PMM3_CLIENT:-"perconalab/pmm-client:3.1.0"} -export IMAGE_PMM3_SERVER=${IMAGE_PMM3_SERVER:-"perconalab/pmm-server:3.1.0"} +export IMAGE_PMM3_CLIENT=${IMAGE_PMM3_CLIENT:-"perconalab/pmm-client:3.4.0"} +export IMAGE_PMM3_SERVER=${IMAGE_PMM3_SERVER:-"perconalab/pmm-server:3.4.0"} if oc get projects 2>/dev/null; then OPENSHIFT=$(oc version -o json | jq -r '.openshiftVersion' | grep -oE '^[0-9]+\.[0-9]+') @@ -885,6 +885,39 @@ get_metric_values() { } +get_metric_values_pmm3() { + local metric=$1 + local instance=$2 + local token=$3 + local start=$($date -u "+%s" -d "-1 minute") + local end=$($date -u "+%s") + local endpoint=$(get_service_endpoint monitoring-service) + + if [ -z "$metric" ]; then + echo "Error: metric is required" + exit 1 + fi + + if [ -z "$token" ]; then + echo "Error: token is required" + exit 1 + fi + + local wait_count=30 + local retry=0 + until [[ $(curl -s -k -H "Authorization: Bearer ${token}" "https://$endpoint/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28$metric%7Bnode_name%3D%7E%22$instance%22%7d%20or%20$metric%7Bnode_name%3D%7E%22$instance%22%7D%29&start=$start&end=$end&step=60" \ + | jq '.data.result[0].values[][1]' \ + | grep '^"[0-9]') ]]; do + sleep 2 + local start=$($date -u "+%s" -d "-1 minute") + local end=$($date -u "+%s") + let retry+=1 + if [[ $retry -ge $wait_count ]]; then + exit 1 + fi + done +} + get_qan20_values() { local instance=$1 local user_pass=$2 @@ -2073,6 +2106,39 @@ deploy_pmm_server() { wait_for_pmm_service } +deploy_pmm3_server() { + helm uninstall -n "${NAMESPACE}" monitoring || : + helm repo remove percona || : + kubectl delete clusterrole monitoring --ignore-not-found + kubectl delete clusterrolebinding monitoring --ignore-not-found + helm repo add percona https://percona.github.io/percona-helm-charts/ + helm repo update + + if [ ! -z "$OPENSHIFT" ]; then + oc create sa pmm-server + oc adm policy add-scc-to-user privileged -z pmm-server + if [[ $OPERATOR_NS ]]; then + timeout 30 oc delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'pmm-pxc-operator-' | awk '{print $1}') || : + oc create clusterrolebinding pmm-pxc-operator-cluster-wide --clusterrole=percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server + oc patch clusterrole/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' -n $OPERATOR_NS + else + oc create rolebinding pmm-pxc-operator-namespace-only --role percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server + oc patch role/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' + fi + local additional_params="--set platform=openshift --set supresshttp2=false --set serviceAccount.create=false --set serviceAccount.name=pmm-server" + fi + + retry 10 60 helm install monitoring percona/pmm -n "${NAMESPACE}" \ + --set fullnameOverride=monitoring \ + --set image.tag=${IMAGE_PMM3_SERVER#*:} \ + --set image.repository=${IMAGE_PMM3_SERVER%:*} \ + --set service.type=LoadBalancer \ + $additional_params \ + --force + + wait_for_pmm_service +} + wait_for_pmm_service() { timeout=420 start=$(date +%s) @@ -2088,6 +2154,69 @@ wait_for_pmm_service() { kubectl_bin wait sts/monitoring --for=jsonpath='{.status.readyReplicas}'=1 --timeout=${timeout}s } +get_pmm_server_token() { + local key_name=$1 + + if [[ -z $key_name ]]; then + key_name="operator" + fi + + local ADMIN_PASSWORD + ADMIN_PASSWORD=$(kubectl get secret pmm-secret -o jsonpath="{.data.PMM_ADMIN_PASSWORD}" | base64 --decode) + + if [[ -z $ADMIN_PASSWORD ]]; then + echo "Error: ADMIN_PASSWORD is empty or not found!" >&2 + return 1 + fi + + local create_response create_status_code create_json_response + local retry=0 + until [[ $create_status_code == 201 ]]; do + create_response=$(curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' \ + -d "{\"name\":\"${key_name}\", \"role\":\"Admin\", \"isDisabled\":false}" \ + --user "admin:${ADMIN_PASSWORD}" \ + "https://$(get_service_ip monitoring-service)/graph/api/serviceaccounts" \ + -w "\n%{http_code}") + + create_status_code=$(echo "$create_response" | tail -n1) + create_json_response=$(echo "$create_response" | sed '$ d') + + sleep 5 + let retry+=1 + if [ "$retry" -ge 24 ]; then + echo "Error: Failed to create PMM service account. HTTP Status: $create_status_code" >&2 + echo "Response: $create_json_response" >&2 + return 1 + fi + done + + local service_account_id + service_account_id=$(echo "$create_json_response" | jq -r '.id') + + if [[ -z $service_account_id || $service_account_id == "null" ]]; then + echo "Error: Failed to extract service account ID!" >&2 + return 1 + fi + + local token_response token_status_code token_json_response + token_response=$(curl --insecure -s -X POST -H 'Content-Type: application/json' \ + -d "{\"name\":\"${key_name}\"}" \ + --user "admin:${ADMIN_PASSWORD}" \ + "https://$(get_service_ip monitoring-service)/graph/api/serviceaccounts/${service_account_id}/tokens" \ + -w "\n%{http_code}") + + token_status_code=$(echo "$token_response" | tail -n1) + token_json_response=$(echo "$token_response" | sed '$ d') + + if [[ $token_status_code -ne 200 ]]; then + echo "Error: Failed to create token. HTTP Status: $token_status_code" >&2 + echo "Response: $token_json_response" >&2 + return 1 + fi + + echo "$token_json_response" | jq -r '.key' +} + run_recovery_check_pitr() { local cluster=$1 local restore=$2 diff --git a/e2e-tests/monitoring-pmm3/run b/e2e-tests/monitoring-pmm3/run index 6a43d8bda7..e2e12b75d1 100755 --- a/e2e-tests/monitoring-pmm3/run +++ b/e2e-tests/monitoring-pmm3/run @@ -7,39 +7,6 @@ test_dir=$(realpath $(dirname $0)) set_debug -deploy_pmm3_server() { - helm uninstall -n "${NAMESPACE}" monitoring || : - helm repo remove percona || : - kubectl delete clusterrole monitoring --ignore-not-found - kubectl delete clusterrolebinding monitoring --ignore-not-found - helm repo add percona https://percona.github.io/percona-helm-charts/ - helm repo update - - if [ ! -z "$OPENSHIFT" ]; then - oc create sa pmm-server - oc adm policy add-scc-to-user privileged -z pmm-server - if [[ $OPERATOR_NS ]]; then - timeout 30 oc delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'pmm-pxc-operator-' | awk '{print $1}') || : - oc create clusterrolebinding pmm-pxc-operator-cluster-wide --clusterrole=percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server - oc patch clusterrole/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' -n $OPERATOR_NS - else - oc create rolebinding pmm-pxc-operator-namespace-only --role percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server - oc patch role/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' - fi - local additional_params="--set platform=openshift --set supresshttp2=false --set serviceAccount.create=false --set serviceAccount.name=pmm-server" - fi - - retry 10 60 helm install monitoring percona/pmm -n "${NAMESPACE}" \ - --set fullnameOverride=monitoring \ - --set image.tag=${IMAGE_PMM3_SERVER#*:} \ - --set image.repository=${IMAGE_PMM3_SERVER%:*} \ - --set service.type=LoadBalancer \ - $additional_params \ - --force - - wait_for_pmm_service -} - spinup_pxc() { local cluster=$1 local config=$2 @@ -89,69 +56,6 @@ spinup_pxc() { fi } -get_pmm_server_token() { - local key_name=$1 - - if [[ -z $key_name ]]; then - key_name="operator" - fi - - local ADMIN_PASSWORD - ADMIN_PASSWORD=$(kubectl get secret pmm-secret -o jsonpath="{.data.PMM_ADMIN_PASSWORD}" | base64 --decode) - - if [[ -z $ADMIN_PASSWORD ]]; then - echo "Error: ADMIN_PASSWORD is empty or not found!" >&2 - return 1 - fi - - local create_response create_status_code create_json_response - local retry=0 - until [[ $create_status_code == 201 ]]; do - create_response=$(curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' \ - -d "{\"name\":\"${key_name}\", \"role\":\"Admin\", \"isDisabled\":false}" \ - --user "admin:${ADMIN_PASSWORD}" \ - "https://$(get_service_ip monitoring-service)/graph/api/serviceaccounts" \ - -w "\n%{http_code}") - - create_status_code=$(echo "$create_response" | tail -n1) - create_json_response=$(echo "$create_response" | sed '$ d') - - sleep 5 - let retry+=1 - if [ "$retry" -ge 24 ]; then - echo "Error: Failed to create PMM service account. HTTP Status: $create_status_code" >&2 - echo "Response: $create_json_response" >&2 - return 1 - fi - done - - local service_account_id - service_account_id=$(echo "$create_json_response" | jq -r '.id') - - if [[ -z $service_account_id || $service_account_id == "null" ]]; then - echo "Error: Failed to extract service account ID!" >&2 - return 1 - fi - - local token_response token_status_code token_json_response - token_response=$(curl --insecure -s -X POST -H 'Content-Type: application/json' \ - -d "{\"name\":\"${key_name}\"}" \ - --user "admin:${ADMIN_PASSWORD}" \ - "https://$(get_service_ip monitoring-service)/graph/api/serviceaccounts/${service_account_id}/tokens" \ - -w "\n%{http_code}") - - token_status_code=$(echo "$token_response" | tail -n1) - token_json_response=$(echo "$token_response" | sed '$ d') - - if [[ $token_status_code -ne 200 ]]; then - echo "Error: Failed to create token. HTTP Status: $token_status_code" >&2 - echo "Response: $token_json_response" >&2 - return 1 - fi - - echo "$token_json_response" | jq -r '.key' -} - verify_custom_cluster_name() { local expected_cluster=$1 local token=$2 @@ -263,39 +167,6 @@ delete_pmm_server_token() { fi } -get_metric_values() { - local metric=$1 - local instance=$2 - local token=$3 - local start=$($date -u "+%s" -d "-1 minute") - local end=$($date -u "+%s") - local endpoint=$(get_service_endpoint monitoring-service) - - if [ -z "$metric" ]; then - echo "Error: metric is required" - exit 1 - fi - - if [ -z "$token" ]; then - echo "Error: token is required" - exit 1 - fi - - local wait_count=30 - local retry=0 - until [[ $(curl -s -k -H "Authorization: Bearer ${token}" "https://$endpoint/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28$metric%7Bnode_name%3D%7E%22$instance%22%7d%20or%20$metric%7Bnode_name%3D%7E%22$instance%22%7D%29&start=$start&end=$end&step=60" \ - | jq '.data.result[0].values[][1]' \ - | grep '^"[0-9]') ]]; do - sleep 2 - local start=$($date -u "+%s" -d "-1 minute") - local end=$($date -u "+%s") - let retry+=1 - if [[ $retry -ge $wait_count ]]; then - exit 1 - fi - done -} - get_qan20_values() { local instance=$1 local token=$2 @@ -404,12 +275,12 @@ compare_kubectl statefulset/$cluster-haproxy desc 'check mysql metrics' sleep 60 -get_metric_values node_boot_time_seconds pxc-prefix-$namespace-$cluster-pxc-0 $NEW_TOKEN -get_metric_values mysql_global_status_uptime pxc-prefix-$namespace-$cluster-pxc-0 $NEW_TOKEN +get_metric_values_pmm3 node_boot_time_seconds pxc-prefix-$namespace-$cluster-pxc-0 $NEW_TOKEN +get_metric_values_pmm3 mysql_global_status_uptime pxc-prefix-$namespace-$cluster-pxc-0 $NEW_TOKEN desc 'check haproxy metrics' -get_metric_values haproxy_backend_status pxc-prefix-$namespace-$cluster-haproxy-0 $NEW_TOKEN -get_metric_values haproxy_backend_active_servers pxc-prefix-$namespace-$cluster-haproxy-0 $NEW_TOKEN +get_metric_values_pmm3 haproxy_backend_status pxc-prefix-$namespace-$cluster-haproxy-0 $NEW_TOKEN +get_metric_values_pmm3 haproxy_backend_active_servers pxc-prefix-$namespace-$cluster-haproxy-0 $NEW_TOKEN desc 'check QAN data' get_qan20_values $cluster-pxc-0 $NEW_TOKEN diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 34d79ec5e5..79b5a35c40 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -1 +1,69 @@ +auto-tuning,8.0 +allocator,8.0 +allocator,8.4 +backup-storage-tls,8.0 +cross-site,8.0 +custom-users,8.0 +demand-backup-cloud,8.0 +demand-backup-cloud,8.4 +demand-backup-cloud-pxb,8.0 +demand-backup-encrypted-with-tls,8.0 +demand-backup-encrypted-with-tls,8.4 +demand-backup-encrypted-with-tls-pxb,8.0 +demand-backup-encrypted-with-tls-pxb,8.4 +demand-backup,8.0 +demand-backup-flow-control,8.0 +demand-backup-flow-control,8.4 +demand-backup-parallel,8.0 +demand-backup-parallel,8.4 +demand-backup-without-passwords,8.0 +demand-backup-without-passwords,8.4 +extra-pvc,8.0 +haproxy,5.7 +haproxy,8.0 +haproxy,8.4 +init-deploy,5.7 +init-deploy,8.0 +limits,8.0 +monitoring-2-0,8.0 +monitoring-pmm3,8.0 +monitoring-pmm3,8.4 +one-pod,5.7 +one-pod,8.0 +pitr,8.0 +pitr-pxb,8.0 +pitr-gap-errors,8.0 +proxy-protocol,8.0 +proxy-switch,8.0 +proxysql-sidecar-res-limits,8.0 +proxysql-scheduler,8.0 +pvc-resize,5.7 +pvc-resize,8.0 +recreate,8.0 +restore-to-encrypted-cluster,8.0 +restore-to-encrypted-cluster,8.4 +restore-to-encrypted-cluster-pxb,8.0 +scaling-proxysql,8.0 +scaling,8.0 +scheduled-backup,5.7 +scheduled-backup,8.0 +scheduled-backup,8.4 +security-context,8.0 +smart-update1,8.0 +smart-update1,8.4 +smart-update2,8.0 +smart-update2,8.4 smart-update3,8.0 +storage,8.0 +tls-issue-cert-manager-ref,8.0 +tls-issue-cert-manager,8.0 +tls-issue-self,8.0 +upgrade-consistency,8.0 +upgrade-consistency,8.4 +upgrade-haproxy,5.7 +upgrade-haproxy,8.0 +upgrade-proxysql,5.7 +upgrade-proxysql,8.0 +users,5.7 +users,8.0 +validation-hook,8.0 \ No newline at end of file diff --git a/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json index f93797cf0b..0701f8f6af 100644 --- a/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json @@ -7,7 +7,7 @@ "pxc": { "8.4.6-6.1": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", - "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", + "image_hash": "bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", "status": "available", "critical": false }, @@ -87,7 +87,7 @@ }, "3.4.1": { "image_path": "percona/pmm-client:3.4.1", - "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", + "image_hash": "sha256:1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", "status": "recommended", "critical": false } diff --git a/e2e-tests/smart-update3/conf/secrets.yml b/e2e-tests/smart-update3/conf/secrets.yml new file mode 100644 index 0000000000..40c399c034 --- /dev/null +++ b/e2e-tests/smart-update3/conf/secrets.yml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Secret +metadata: + name: my-cluster-secrets +type: Opaque +data: + root: cm9vdF9wYXNzd29yZA== + xtrabackup: YmFja3VwX3Bhc3N3b3Jk + monitor: bW9uaXRvcl9wYXNzd29yZA== + proxyadmin: YWRtaW5fcGFzc3dvcmQ= + pmmservertoken: cG1tc2VydmVyX3Bhc3N3b3Jk + operator: b3BlcmF0b3JhZG1pbg== + replication: cmVwbF9wYXNzd29yZA== +--- +apiVersion: v1 +kind: Secret +metadata: + name: some-name-ssl +type: kubernetes.io/tls +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFakNDQWZxZ0F3SUJBZ0lSQUtvY3pHb0VMcTI3aEVCY1FwZjBHNm93RFFZSktvWklodmNOQVFFTEJRQXcKRWpFUU1BNEdBMVVFQ2hNSFVtOXZkQ0JEUVRBZ0Z3MHlNVEEwTWpNd09URTBORFJhR0E4NU9UazVNVEl6TVRJegpOVGsxT1Zvd0VqRVFNQTRHQTFVRUNoTUhVbTl2ZENCRFFUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQCkFEQ0NBUW9DZ2dFQkFMcENHbFlDbGNLZko1cENxK29udzByVnFBV3N4RGh0MnhzWnpTcHROc0duVjVQTG9xSVMKYWNzWloxbWtDSFRtNG9qYTdCSFk3ZjhPbnl1R0RySFdhZk55WkhLMCtqcTFOMm1SZ2p6azFQOVlxRGVwOURZRwo2WW1EWU43TktBVVlQdFlRUmJ1NFZOVXdTZHI4WTFpQytsSkJNODNkZzk2MXY1eDBZMGVSeERiazVuOFdOMjdTCjBHSmhLSGVvRzlacFBvVFlhNmw3V1FZalBZcVZLcmJkSGxZUUxDTE43MHlhOUF4ckNhQ3h1emh3R2VlREM1S2IKNFljWmloRlBueUxoYlBla2lSKzZIUE56dGdCV0NlVWhGdDFZZHliNXpFS3VSQm9jT0ttMWgwclFuWUV3eVRlQwpGSDY2WUhWQUMwTGFOeXhyUnlOM3hScVJPQWdQMWpNM0RKa0NBd0VBQWFOaE1GOHdEZ1lEVlIwUEFRSC9CQVFECkFnSUVNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBUEJnTlZIUk1CQWY4RUJUQUQKQVFIL01CMEdBMVVkRGdRV0JCU1luUGJlODNDRFA0VVZHeEJMOWtsaTE4MGp3REFOQmdrcWhraUc5dzBCQVFzRgpBQU9DQVFFQWRyZlFVMXViUHNhWFRzQXRad2l1eG9FZ2JTalIvdUNUSDdxbU1Vd1duWkUxRzl6NUVEMGg1empaCm1TdTdjVkltbVlLeTBBaVZpOTIwWmx2ZXJ2MWNtVmNUMkFxTzBJRjJybHZNVTZmZ0UwM05vR2phazdzWmFHeVgKUDFQaXlpKytLbHNHektXcnFMWXd3Q0RWdnVWYUVpSnJ1QlRMcjE1MWNXUVV6Z2J6b2NiMnFBYjIyNFpEeTRDaQpFM2JaMDRmeVQxdXRLZ0RCZ1EzNXRsQ0FEbTRPTitZVEZ5QWIvMitjbk5pSmFqNVBBWjhNMUZwZkNCTUJLdEc5CmZ6VXA2VVBiMDFnaStzRUZFaGpycWpPd3dRMUU3TG1yMyszeWpFY2hiRTY4aTRrcm9uUWRZUEUyL083VEJBVlgKNE9yYXF6ekt0K0xBM2NkOWg3czc2amZ0SzRIdkhnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURRakNDQWlxZ0F3SUJBZ0lRYjVhemI4Q2lwNWdyMmJ4RXNoc0xyREFOQmdrcWhraUc5dzBCQVFzRkFEQVMKTVJBd0RnWURWUVFLRXdkU2IyOTBJRU5CTUNBWERUSXhNRFF5TXpBNU1UUTBORm9ZRHprNU9Ua3hNak14TWpNMQpPVFU1V2pBT01Rd3dDZ1lEVlFRS0V3TlFXRU13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUURGUmkzcURHemkvb2dyUU12UnBSK0pJMmQ3RSt3UDVjYjRHVFE2UVRBUis5ZWNpK1djcDJWM2NYK2MKcnFkVmhVT0JFT3NNU2k0R3o5NklYeWFCZzBPeVVLeWhXbTBDMEhrWkZqaDFCRGFlTGd1NzlETHBzVThVU3lmQgplQ0picmZ3dGFPdmtQaUloWnVFNFN6dXhlZzduMzA5ZHVaVldOSjVFOHAvSGZjN3NWSDVacnh0MlU0RnBZYytJClk3ZVJaMnhKWklXREExNkdWRDdqRDRESUdsd3NNQUxEaElpMXdGOWZrNTM5bUdQYTRaZkhpa1E1R0dLT29RV3IKaVFUdlF0S3hkRmdnYzJmOUhZYlMzSEFHTkE4VFFmMW9weXh2cHU5ZHh0VmpWVlhBeUlRVmFkdjl3ajBNcEppMwp4NFptYmRKUzdrR3VINVZaYXdsNjZWZ3ZCMFpoQWdNQkFBR2pnWlV3Z1pJd0RnWURWUjBQQVFIL0JBUURBZ1dnCk1CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFNQmdOVkhSTUJBZjhFQWpBQU1GTUcKQTFVZEVRUk1NRXFDRFhOdmJXVXRibUZ0WlMxd2VHT0NFbk52YldVdGJtRnRaUzF3Y205NGVYTnhiSUlQS2k1egpiMjFsTFc1aGJXVXRjSGhqZ2hRcUxuTnZiV1V0Ym1GdFpTMXdjbTk0ZVhOeGJEQU5CZ2txaGtpRzl3MEJBUXNGCkFBT0NBUUVBaVU1ZFFpbmtXazg2Q2xZRm5CbysyRUV3YWFLaWtNY014bnN2cFBtbGhJV0JQamh6ZnJpMG1Pa0QKSUJOUGVpUlNtTUJNMWJONHMyWUp3U3VueVZWMnNQVm5HRVhjVGhhNmlGQ0FYQSs4cGJVd3BEc0NMNTQzVjJheQp5R1FPTkNSNEs0NEV5eEljT1lTcFN1NVJvbmVkd1BEWXN0N1BzRHQvV3ZDWlVaakVyZC9PbTdrVUVLZklwTjdICnJ5Vmcvdi9ocG1JTWZTT0c2VDlzMW1sZ2U4THkvNk56c1E0dTJxaE5CZHJIWFhhdjRJcTdGd2RoaHV2dEdJZWgKWXJtaWpCalc5WVVOaVllVjV2cVdQZFdQUjl0cVNhMTlrcDg5Um9xalNVT05Bb1FvZ1JHcHAyWkw3UE9qaGw4ZAp2YTFLRksyWFhYYnJVRnVsbWIxem5LaFY1QUNialE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFVZdDZneHM0djZJSzBETDBhVWZpU05uZXhQc0QrWEcrQmswT2tFd0VmdlhuSXZsCm5LZGxkM0Yvbks2blZZVkRnUkRyREVvdUJzL2VpRjhtZ1lORHNsQ3NvVnB0QXRCNUdSWTRkUVEybmk0THUvUXkKNmJGUEZFc253WGdpVzYzOExXanI1RDRpSVdiaE9FczdzWG9PNTk5UFhibVZWalNlUlBLZngzM083RlIrV2E4YgpkbE9CYVdIUGlHTzNrV2RzU1dTRmd3TmVobFErNHcrQXlCcGNMREFDdzRTSXRjQmZYNU9kL1poajJ1R1h4NHBFCk9SaGlqcUVGcTRrRTcwTFNzWFJZSUhObi9SMkcwdHh3QmpRUEUwSDlhS2NzYjZidlhjYlZZMVZWd01pRUZXbmIKL2NJOURLU1l0OGVHWm0zU1V1NUJyaCtWV1dzSmV1bFlMd2RHWVFJREFRQUJBb0lCQUJQL3pWL1E2cWtJak4xMQpzRFFNdldtMXdTUGxIU1Z2Y2dCczdiS0tNVWxoY3JaWUtHcjlQWGdpZFlGNDQxWDBNS2ZCQTFNM29mcm16L1NiClVOZ0lhckNGSlRzbVk2N0pXNzJEVnkzbHczTzl6MHE2c3Y2bUR4SkxuRGdzNmE3SnNLc1oyL0JEckVvZWwrNlIKRWZnb0c2SytMTm1mbXRnbWRYVWY4N3JSc2t5YkJuOHpHanVQbmZHR2FkeWR1SUZVRWlzU2E1TFRoVHFVWjNrZwp5ZDJQUVdYcUkvejNXRGZzUW01dnN4TnBWYk5OaHBhaHQySE5kQnljNnJGbVg4TFcvU3N5S1lLQm9xK080c1NuCitBS3hEUUVTZmFveG4xdGZObGxXYnZjaFRVSTdyV1BSWEVxdFlhVmZsT3FFV1E1RjJPRVRDYkcxSnBPZDBXNnAKRnBnRFN0RUNnWUVBOXdYNFRHVTZaaGFFb3hVRHdOQ0RMZU56cjdabzBVNFBnTTd6U1NNdmpVa2NRWllSV0RKVQp6MjVLdmo0N3dId0x5QnZxMnY2REJDb0xQdnlSbEpTaUtnVzUzYzJtcUlIRTM2VzR0RU5uNk9qTTZWMk5pclVvCldaakJYTGo0d2s3Z21uM1RaaFk5clZSVXBuZTdmOXp6Vkp0RHFTK0cyLzI0SGFvZlpjakFXcThDZ1lFQXpIRmwKL2lCSSt6ak5vWGZjVEh1cTdkTUpzNGtoM3RNWnFheldTS2xYOTliRDJGU2ZpQldDNWY1MURLSEJGWTFCb0FvKwpQRDVBRUhzTUs0dWhqQXl3WVJLMWhNY3lSUSsxQ2EzaHZoL1k4LzVsdHUwU3ltRUE2RnZnN3FvTEI0U3NSKzk4CmtFSVVDZFdNYmNiYlZyd1N6Rnk4aFlJdm1idXo5YWs4YWFZcWMrOENnWUJwSExvMlQ5RFV4Zm8wcHVtTTcyMFMKWnJuQWFEOGI2VHFCbGc1QjdkMzdJY1FMU0g4aTUyL2RRRkkvdDUyWFgzRm4rakVxZEtodGFqS1UzOFpXMGhSYgpDMHEyemg1L2o1Q2xsaEFOYksxWGwxQXljU09jaFUraEtIWUhMWkowcERuQW81QUQzYXpyQjNwcHg0TWFlbnQwCllna2RnZUxCMDZtM3ZiOVVnV01RY3dLQmdRQ0RWL2NtcEtjK2Z1ZmM3SnhqcHEvUnl0dFl0S2xGOW1sdVVOUDYKS3BHUmNEQ3lNdXVyQ2VibXkrdDNDaFI2UEI0Rks0K0FOSjREK0ltQkVGdmhGZVhhOTZJV2c4TVFDMTlMc0tKdQoreGJ1Nm1sK2RDQzJWRXU0L0E0dVJxQi9YQVV5MGZFODNMYnkrbmNWcjhRS25SbVdvWjJjU0Y4OGJSTFlxTGxmClJwblBrUUtCZ1FEZGJNRW1RQXY5bERNVG9Xa1VrLzk4RDlmVWI2WXFQWTN2YW8zSmpweU9waTZkUUFPVHRKZXIKMnVSRDBjQ3MxbWVicStXS0VsMU1BSklEQWhvM2FBcGU0cStxbDNMc1ArTDNhTDBTMEZUUTVFNkpIOEhkMEdVQwpxaXZOdXFTdUtnR09VR2x0cGRSakYrbi9hUGUzYlFQRjVGSGEzeUpWS1BUbFlRNGk1enlFMWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +--- +apiVersion: v1 +kind: Secret +metadata: + name: some-name-ssl-internal +type: kubernetes.io/tls +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFakNDQWZxZ0F3SUJBZ0lSQU5uMFo0M1hINXRDZVh5YSs4blRnSXN3RFFZSktvWklodmNOQVFFTEJRQXcKRWpFUU1BNEdBMVVFQ2hNSFVtOXZkQ0JEUVRBZ0Z3MHlNVEEwTWpNd09URTBORFZhR0E4NU9UazVNVEl6TVRJegpOVGsxT1Zvd0VqRVFNQTRHQTFVRUNoTUhVbTl2ZENCRFFUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQCkFEQ0NBUW9DZ2dFQkFLNTFmRkVvY0ZGaEkvU095VUxmODBPZW42cCtBTkc5dGo4RzVoQ3lkbmxsQjhYRzVrNWUKd3I2Nkh1V2FFYlhpeU5PYlUvUG84ZXJlSUN4NkRFcHErNzNlbE1UVXdaYm9FK2RjSGYxRDhuWko5QVp0QTdqVwo1SXBINjdqcUZzOXBhMnZ3S2UwbU9hSXE2dFg0OUVxRGVuS1VXVlU1enNNUDFPcEE5UFIxVXB1aTg1MGxUeUJ1Ck0yTysreFJsNFQrL1Vod1JySE9lY3VEcm5QeFh2WFZ2NXVJWXlPeGVSNjc0b3BPdHF4R1ZKcVBBZWF5Mkpla0gKdFNuclBYS09nYklxZ2pKaVg5VHRLM0twQ2E5ZVVEYkRrQTB5MjV1Q2ZiejRuRnVzZjMzOGhoUUhab212ZFJjcwoxS0t4RTFOdUVTZTlNcVIxYXk1dUhhMjZwK2xqdHUrQk1na0NBd0VBQWFOaE1GOHdEZ1lEVlIwUEFRSC9CQVFECkFnSUVNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBUEJnTlZIUk1CQWY4RUJUQUQKQVFIL01CMEdBMVVkRGdRV0JCVGI2ckNpbjFHdkY2WkNSL0VHZ0thZkNjRE1vakFOQmdrcWhraUc5dzBCQVFzRgpBQU9DQVFFQWVzdXJZZWR0eDR3a2xpOHZMMmVkOE5lYnVrNERpL3pkc3hCeU5ZVXN0WHdvVjQ0NkNLYWtrajNHCmIrZ3NvVWRhWkwvUU44bHBYRmp2V09RanR6VXdVUVpZb1hQK2oxbWpoc0dNdEs4b1hUUno4ZEliT1l3S2EvYloKdmQ1cGw4dkp4bUZxU3pZV1hod2tEMEJKRTJ0SE0yK3NsOHZTTXErekN6bnQ2RVRhOUJSQUdQK09uQTJrRDBpZwpnalJETEpmTnIxdEgwUGcyUzZwcm9ZdUZkcjhUTjM5S0pSQjVtQ3BLbzBxbTd2TGJ5aFh4NkVRNGZtVDlLZEFFCjA0MmhWUng1SVpHS0EwbVVXSndia0xvcWtPRi82RGtrdGFUeTJzKzJSN2xzRGFtcmFjc2p3OXRLQzEzakp0U2QKMy81V1BkT1VPQXhBTkt5MmNqYTUxTkdDZU44bVVBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGekNDQWYrZ0F3SUJBZ0lSQUk1V0JFVGNsL2RiY0VLZm5FUGVodzh3RFFZSktvWklodmNOQVFFTEJRQXcKRWpFUU1BNEdBMVVFQ2hNSFVtOXZkQ0JEUVRBZ0Z3MHlNVEEwTWpNd09URTBORFZhR0E4NU9UazVNVEl6TVRJegpOVGsxT1Zvd0RqRU1NQW9HQTFVRUNoTURVRmhETUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCCkNnS0NBUUVBdjFHRGJQM1lzYS9uN2xWRmxOcUkyWnJTRWR6UlYwRTl0eVdmRC94RHZvTTVPOVIwQ2YxdDdLZUoKMm5ZajMyNEZvM1NCTVBLSWFycWY3eU9hUFpFU0ZRbVRIaklSNUFkOTY3ajRZVzluc2pqcnFjRnRCVkJLaFNoZApQaS95bElva25tbmIyRnZEdDMxWmsvWXRFM3p2MUl0SnJFSEFTNG1xSU5taUJYeXNKZFZURGZ0dEI5cDhBSGlTCk55TW9YWXVTQmpWNjQ5YytQb3dQWUh2UTRGRTBOYUNTTmkzRnFnSFFHR0xtTkM2aWpBZ1hPOTFsckxaRk5MbGoKaHRKTzRHQm9aRTJ3MFFEWGpDRzZJektvRUdUY2J0NUY4MnJVRTM1SGRKQk52MGkrU1pYS3lWVlVaNXpLUTY5OApHaHJmQkQ2emFTWkZyVnpkYWdnV0N1VVpVcS85cHdJREFRQUJvMm93YURBT0JnTlZIUThCQWY4RUJBTUNCYUF3CkhRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0tRWUQKVlIwUkJDSXdJSUlQS2k1emIyMWxMVzVoYldVdGNIaGpnZzF6YjIxbExXNWhiV1V0Y0hoak1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUJBUUJUQW4vVlUrTlJnSlRKQXdjTDYxZ2R1a3VsRjJ4aEdWN00yb2t4RGJNVFBJd3YzS0FQCkV0OXR1NGhJTTF5S1VYcHM5bVZ4ZDgzaXd1TTZsSURGQW0vM3RsL1pGMlRCVmU3UHpZUDVuMGN3d05pNjhUZTQKNnlhc2hUdTBrbjBOeFYyazF5VG05Y3dVOXNoRGZTTFZwV0VlZWxOQ2ZKcEVWNGdhWHVkaWRMdnZ1ZVgySG5IVwo1MFFyT2lnZExRczlqZ3VyaFJEZlArMEs4NkJyb2JIcU1rbVphR2hJRUYwaS9IcCt4VW1xcytBVG9oMlIzbkwrClVVRXEreGFoT3A0MHJtdkVLRlg5U3Z2bUIyQ2Rkd2NFWjNUbnhRYUR0N2NYNlpJUU9uQmxQK1Y3YjdaWDFNeTAKc3MrbmhDWU9nZWVrUWZqQktBb092SllnZEZjQ2tkeHkvd043Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdjFHRGJQM1lzYS9uN2xWRmxOcUkyWnJTRWR6UlYwRTl0eVdmRC94RHZvTTVPOVIwCkNmMXQ3S2VKMm5ZajMyNEZvM1NCTVBLSWFycWY3eU9hUFpFU0ZRbVRIaklSNUFkOTY3ajRZVzluc2pqcnFjRnQKQlZCS2hTaGRQaS95bElva25tbmIyRnZEdDMxWmsvWXRFM3p2MUl0SnJFSEFTNG1xSU5taUJYeXNKZFZURGZ0dApCOXA4QUhpU055TW9YWXVTQmpWNjQ5YytQb3dQWUh2UTRGRTBOYUNTTmkzRnFnSFFHR0xtTkM2aWpBZ1hPOTFsCnJMWkZOTGxqaHRKTzRHQm9aRTJ3MFFEWGpDRzZJektvRUdUY2J0NUY4MnJVRTM1SGRKQk52MGkrU1pYS3lWVlUKWjV6S1E2OThHaHJmQkQ2emFTWkZyVnpkYWdnV0N1VVpVcS85cHdJREFRQUJBb0lCQUgyOEZaTEkxZVFaUm1VMQpOM0NYbXY3Z0tCVkRPWFdOMUFDTXdoZmdZNmxPVm93Q21XeURXUDRDTWJBWW1vVFpxdXo4d1hjb3ZnemVzQU9iCk0yQW8rbWhIRFovM3FoaGg4QmkrVE1iRVQ3TXFnMzdpTjBNWnZJLzhzaU1Qb2U2TFIzdXcrS0lkMU9lL2d1S2kKRklmTlFWVS9ZV0RMS1pTN1djc1RITTljVzByVTMxOTdYeWpndHJ6VE0vU21VS3ljSjBtOWRvMHhvbHJ3VUJzRQpWQ2pCMHd0RWkvVCtCSWY0RTJYSlZyNDdlajJBNTlDeGs0WHp3NjBsaG8vb2gvQkVSL0J2QXhZNTFheTlrSEM2Cnl3VWxOV1hkTGVGS2U1Rnk2cmRvZVJGY0VNdVZ6a2QzV0E0Rm5hRmlOWGhLeStSNlNGMXdiY1BnTlc0a3dydUcKbEx4dklka0NnWUVBNzNkQ001WGd5K0VEZDhhWDBKazN2UEdVeXcrOGgzZEt1ckpZQ1drT1QzMVhkdUFhcEhINApFOXNycXlzeDJWbWxFdVltajRGd0FsdUJjY05Ba2wvZXYyaFAyUThHUkZHaGRqU0I4bXVjMVVUYU9tc2x4WXdUCkNyZUZveWtoN2QwTDBhNkovcnVwVk5zYmdRbmx1VzVnb1JIY25SVXM2TGxadngrSENxUDI1aHNDZ1lFQXpJYzIKU1pHazNMV0pHdVJvbjdpeVhIUlBlSUhBN25lWUZ5QW1GQXl5Zjc0REF2NWh4SFMvd2hhcEkxS0lmVHNYNlNnaQp3ck5MTWxOY0hvd0U1dUd2SzJuOXoxdWZqZVRFbjFzeVZLZTQzTWtKRHAxQk5NSlVhQmg2WC9oN3BDL3lFTUMvCjloaGdvbFN2ZnE3WlFDVms3ejdHK1VvNWJCbzR3YTJRTDB1YzcyVUNnWUVBMGtWck9UYlRWTk01bzFRYi9NeDkKZUFpOXVlMFFnL2RKQkZVVUh2ZEQzS0xZU1ljUmZmS2hmbllHaTEvN2ZycEx6Q09TR3BMekV1N2M1Rk1xQVIzegp6eWsrYS8vWVplYzBHMEtTRkpkUCtLbGo0c1l4UFp6NUg0RDA5TWRxaHA2Q0FWWDgwRlJpcFNOY3JGdFBnQnNlCitIQmh3d0ZVRk9xa2xzR05aOWVBTnA4Q2dZRUF1VVVLTWg1U2Z2T2EzTmRteHJoaUtVbmE2MGh1WWhYSG1ic3YKZ3gzMVc2M2R3SXA2T2FHZ1NzcUlNRTBGQXB2VER3dWlZVWhVcDZQSFlJeHByRk5uZ0NZbEdmN2ZTNE9kY05VRgpoSlhoNlczYVhIaXFwb1lhTzZsZ3dTcXZwWTBnODNnRzY0QXRtUjZwVWxKRXpjeVIzLzYzOHYzL2dpTkdvbnI4Cmp6c3BUOWtDZ1lBeEZMaU9LK3I0d0RYSlBlWnRHSGVZY1JHYnJzSFdEdlRzMEFtQzllWklhWW5NaWcxMVJBSFgKYkk1dzFYcUsvUVNFcEtoMEhXbzBncmVLWk16MXpBaUx0UU5YVFBFbFZQaURZTWJwNzlsQisvcm9raGp4SEhVVQpUR09BakUvWVR2S3JwZkY1WXUwcjV1elJDdjNpek9lK2RoNWlyemU2dEQ1TVRsOGp3ZlFjZlE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/e2e-tests/smart-update3/conf/smart-update-pmm2.yml b/e2e-tests/smart-update3/conf/smart-update-pmm2.yml deleted file mode 100644 index e94b8404b9..0000000000 --- a/e2e-tests/smart-update3/conf/smart-update-pmm2.yml +++ /dev/null @@ -1,80 +0,0 @@ -apiVersion: pxc.percona.com/v1 -kind: PerconaXtraDBCluster -metadata: - name: smart-update - finalizers: - - percona.com/delete-pxc-pods-in-order -spec: - crVersion: 9.9.9 - updateStrategy: SmartUpdate - upgradeOptions: - versionServiceEndpoint: https://127.0.0.1/versions - apply: recommended - schedule: "0 4 * * *" - secretsName: my-cluster-secrets - pause: false - pxc: - size: 3 - image: -pxc - resources: - requests: - memory: 2Gi - cpu: "1" - limits: - memory: 2Gi - cpu: "1" - volumeSpec: - persistentVolumeClaim: - resources: - requests: - storage: 6Gi - affinity: - antiAffinityTopologyKey: "kubernetes.io/hostname" - proxysql: - enabled: false - size: 2 - image: -proxysql - resources: - requests: - memory: 1Gi - cpu: "1" - limits: - memory: 1Gi - cpu: "1" - volumeSpec: - persistentVolumeClaim: - resources: - requests: - storage: 2Gi - affinity: - antiAffinityTopologyKey: "kubernetes.io/hostname" - haproxy: - enabled: true - size: 2 - image: -haproxy - resources: - requests: - memory: 1G - cpu: 600m - affinity: - antiAffinityTopologyKey: "kubernetes.io/hostname" - podDisruptionBudget: - maxUnavailable: 1 - gracePeriod: 30 - pmm: - enabled: false - image: percona/pmm-client:2.44.0 - serverHost: monitoring-service - serverUser: pmm - backup: - image: -backup - serviceAccountName: percona-xtradb-cluster-operator - storages: - pvc: - type: filesystem - volume: - persistentVolumeClaim: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 2Gi diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 9ce29d9d50..6966582360 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -97,80 +97,26 @@ function main() { create_infra "${namespace}" deploy_version_service deploy_cert_manager + deploy_helm "${namespace}" kubectl_bin patch crd perconaxtradbclusters.pxc.percona.com --type='json' -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' kubectl_bin ${OPERATOR_NS:+-n $OPERATOR_NS} set env deploy/percona-xtradb-cluster-operator "PERCONA_VS_FALLBACK_URI=http://version-service.${namespace}.svc.cluster.local:11000" - ################################################## - desc 'PMM2 cluster update with the recommended image by version service' - - # Prepare cluster config - cp -f "${test_dir}/conf/${CLUSTER}-pmm2.yml" "${tmp_dir}/${CLUSTER}-pmm2.yml" - yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm2.yml" - - spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm2.yml" - - - # Get actual PXC version from running cluster and add to version service - wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - ACTUAL_PXC_VERSION=$(kubectl_bin get pxc "${CLUSTER}" -o jsonpath='{.status.pxc.version}') - desc "Detected PXC version: ${ACTUAL_PXC_VERSION}" - add_pxc_version_to_vs "${ACTUAL_PXC_VERSION}" "${IMAGE_PXC}" - - # Enable PMM2 with older version - kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"pmm":{"enabled":true,"image":"percona/pmm-client:2.44.0","serverHost":"monitoring-service"}}}' - wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - - # Get initial PMM2 image - initial_pmm2_image=$(kubectl_bin get pod "${CLUSTER}-pxc-0" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') - desc "Initial PMM2 image: ${initial_pmm2_image}" - - # Get recommended PMM2 image from version service - pmm2_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:2")) | select(.value.status == "recommended") | .value.imagePath') - desc "Target PMM2 recommended image: ${pmm2_recommended_image}" - - # Compare images and exit if they are the same - if [ "${initial_pmm2_image}" == "${pmm2_recommended_image}" ]; then - desc "PMM2 images are the same (${initial_pmm2_image}), skipping upgrade test" - exit 1 - fi - - # Update cluster to use version service with short schedule - kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' - - # Wait for PMM2 update - wait_pmm_update "${CLUSTER}" "${CLUSTER_SIZE}" "${pmm2_recommended_image}" "2" || exit 1 - - wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - - # Verify PMM2 updated - for i in $(seq 0 $((CLUSTER_SIZE - 1))); do - actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') - if [[ "${actual_pmm_image}" != *"${pmm2_recommended_image}"* ]]; then - echo "ERROR: PMM2 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm2_recommended_image}, Got: ${actual_pmm_image}" - exit 1 - fi - done - desc "PMM2 successfully updated to ${pmm2_recommended_image}" - - kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm2.yml" - kubectl_bin delete pvc --all - ################################################## desc 'PMM3 cluster update with the recommended image by version service' - desc "Updating secret for PMM3 (pmmserver -> pmmservertoken)" - # Get current pmmserver value from my-cluster-secrets - pmm_password=$(kubectl_bin get secret my-cluster-secrets -o jsonpath='{.data.pmmserver}') - - # Patch my-cluster-secrets: remove pmmserver, add pmmservertoken - kubectl_bin patch secret my-cluster-secrets --type=json \ - -p '[{"op":"remove","path":"/data/pmmserver"},{"op":"add","path":"/data/pmmservertoken","value":"'${pmm_password}'"}]' + desc 'install PMM3 Server' + deploy_pmm3_server # Prepare PMM3 cluster config cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yml" "${tmp_dir}/${CLUSTER}-pmm3.yml" yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" - spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yml" + + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yml" 3 10 "${test_dir}/conf/secrets.yml" + + desc 'add PMM3 token to secret' + TOKEN=$(get_pmm_server_token "operator") + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "'"$TOKEN"'"}}' # Get actual PXC version from running cluster and add to version service wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" @@ -186,33 +132,43 @@ function main() { pmm3_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:3")) | select(.value.status == "recommended") | .value.imagePath') desc "Target PMM3 recommended image: ${pmm3_recommended_image}" + # Compare images and skip if they are the same if [ "${initial_pmm3_image}" == "${pmm3_recommended_image}" ]; then - desc "PMM3 images are the same (${initial_pmm3_image}), skipping upgrade test" - exit 1 + desc "PMM3 images are the same (${initial_pmm3_image}), skipping PMM3 upgrade test" + exit 1 fi + # Update cluster to use version service + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' - # Update cluster to use version service - kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' + # Wait for PMM3 update + wait_pmm_update "${CLUSTER}" "${CLUSTER_SIZE}" "${pmm3_recommended_image}" "3" || exit 1 - # Wait for PMM3 update - wait_pmm_update "${CLUSTER}" "${CLUSTER_SIZE}" "${pmm3_recommended_image}" "3" || exit 1 + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + # Verify PMM3 updated + for i in $(seq 0 $((CLUSTER_SIZE - 1))); do + actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + if [[ "${actual_pmm_image}" != *"${pmm3_recommended_image}"* ]]; then + echo "ERROR: PMM3 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm3_recommended_image}, Got: ${actual_pmm_image}" + exit 1 + fi + done + desc "PMM3 successfully updated to ${pmm3_recommended_image}" + + # Verify PMM3 metrics are being collected + desc 'verify PMM3 metrics are being collected' + sleep 60 + TOKEN=$(getSecretData "my-cluster-secrets" "pmmservertoken") + get_metric_values_pmm3 haproxy_backend_status $namespace-${CLUSTER}-haproxy-0 $TOKEN + get_metric_values_pmm3 haproxy_backend_active_servers $namespace-${CLUSTER}-haproxy-0 $TOKEN + desc "PMM3 metrics verified successfully" - # Verify PMM3 updated - for i in $(seq 0 $((CLUSTER_SIZE - 1))); do - actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') - if [[ "${actual_pmm_image}" != *"${pmm3_recommended_image}"* ]]; then - echo "ERROR: PMM3 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm3_recommended_image}, Got: ${actual_pmm_image}" - exit 1 - fi - done - desc "PMM3 successfully updated to ${pmm3_recommended_image}" kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yml" kubectl_bin delete pvc --all desc 'cleanup' + helm uninstall monitoring kubectl_bin delete -f "${test_dir}/conf/vs.yml" destroy "${namespace}" desc "test passed" From 1421029a7a333eb2e60eb6d5ff9ecf07166c3f0a Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Fri, 12 Dec 2025 13:51:17 +0100 Subject: [PATCH 12/21] add retries --- e2e-tests/smart-update3/run | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 6966582360..6404777cfb 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -1,6 +1,5 @@ #!/bin/bash # CASES: -# - Update PMM2 to recommended version via version service # - Update PMM3 to recommended version via version service set -o errexit @@ -54,7 +53,7 @@ function wait_pmm_update { local cluster_size=${2} local expected_image=${3} local pmm_version=${4} # e.g., "2" or "3" - local max_retry=${5:-120} # Default 10 minutes + local max_retry=${5:-240} # Default 20 minutes desc "Waiting for PMM${pmm_version} containers to update to ${expected_image}..." set +x From eb0720791c224da384b524ae63e870671f5d749a Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Sat, 13 Dec 2025 17:57:44 +0100 Subject: [PATCH 13/21] add debug info --- e2e-tests/smart-update3/run | 94 +++++++++++++++++++++++++++++++++---- 1 file changed, 85 insertions(+), 9 deletions(-) diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 6404777cfb..e6546b6e09 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -48,35 +48,104 @@ function add_pxc_version_to_vs { sleep 10 } +#function wait_pmm_update { +# local cluster=${1} +# local cluster_size=${2} +# local expected_image=${3} +# local pmm_version=${4} # e.g., "2" or "3" +# local max_retry=${5:-240} # Default 20 minutes +# +# desc "Waiting for PMM${pmm_version} containers to update to ${expected_image}..." +# set +x +# local retry=0 +# echo -n "Waiting for PMM update" +# +# until [[ $retry -ge $max_retry ]]; do +# local updated_count=0 +# for i in $(seq 0 $((cluster_size - 1))); do +# local actual_pmm_image=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "") +# +# # Only check if actual image contains expected image (handles docker.io prefix) +# if [[ "${actual_pmm_image}" == *"${expected_image}"* ]]; then +# ((updated_count += 1)) +# fi +# done +# +# if [[ ${updated_count} -eq ${cluster_size} ]]; then +# echo " Done! All ${cluster_size} pods updated." +# set -x +# return 0 +# fi +# +# echo -n "." +# ((retry += 1)) +# sleep 5 +# done +# +# # Timeout reached +# set -x +# echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" +# echo "Expected image: ${expected_image}" +# for i in $(seq 0 $((cluster_size - 1))); do +# local actual=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "none") +# echo " ${cluster}-pxc-${i}: ${actual}" +# done +# return 1 +#} + function wait_pmm_update { local cluster=${1} local cluster_size=${2} local expected_image=${3} - local pmm_version=${4} # e.g., "2" or "3" - local max_retry=${5:-240} # Default 20 minutes + local pmm_version=${4} # e.g., "2" or "3" + local max_retry=${5:-240} # Default 20 minutes desc "Waiting for PMM${pmm_version} containers to update to ${expected_image}..." set +x + local retry=0 - echo -n "Waiting for PMM update" + echo "Waiting for PMM update" + echo "Expected image: ${expected_image}" + echo "Cluster size: ${cluster_size}" + echo until [[ $retry -ge $max_retry ]]; do + echo "Iteration $((retry + 1)) / ${max_retry}:" + local updated_count=0 + for i in $(seq 0 $((cluster_size - 1))); do - local actual_pmm_image=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "") - # Only check if actual image contains expected image (handles docker.io prefix) + local pod="${cluster}-pxc-${i}" + + local actual_pmm_image + actual_pmm_image=$(kubectl_bin get pod "${pod}" \ + -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' \ + 2>/dev/null || echo "") + + if [[ -z "${actual_pmm_image}" ]]; then + echo " ${pod}: pmm-client container NOT FOUND yet" + continue + fi + if [[ "${actual_pmm_image}" == *"${expected_image}"* ]]; then + echo " ${pod}: ${actual_pmm_image}" ((updated_count += 1)) + else + echo " ${pod}: ${actual_pmm_image}" fi done if [[ ${updated_count} -eq ${cluster_size} ]]; then - echo " Done! All ${cluster_size} pods updated." + echo + echo "Done! All ${cluster_size} pods updated." set -x return 0 fi - echo -n "." + echo "Updated: ${updated_count}/${cluster_size}" + echo "Waiting..." + echo + ((retry += 1)) sleep 5 done @@ -85,10 +154,17 @@ function wait_pmm_update { set -x echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" echo "Expected image: ${expected_image}" + echo "Final pod states:" + for i in $(seq 0 $((cluster_size - 1))); do - local actual=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "none") - echo " ${cluster}-pxc-${i}: ${actual}" + local pod="${cluster}-pxc-${i}" + local actual + actual=$(kubectl_bin get pod "${pod}" \ + -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' \ + 2>/dev/null || echo "none") + echo " ${pod}: ${actual}" done + return 1 } From 7c1b0db315deda970bb9172cf0aef356c947b861 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Sun, 14 Dec 2025 18:28:55 +0100 Subject: [PATCH 14/21] add debug --- e2e-tests/smart-update3/run | 39 +++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index e6546b6e09..9b44e5455c 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -165,6 +165,45 @@ function wait_pmm_update { echo " ${pod}: ${actual}" done + # ========================= + # Timeout reached — DEBUG + # ========================= + set -x + echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" + echo + echo "Expected image: ${expected_image}" + echo + echo "Final pod states (pmm-client images):" + + for i in $(seq 0 $((cluster_size - 1))); do + local pod="${cluster}-pxc-${i}" + local actual + actual=$(kubectl_bin get pod "${pod}" \ + -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' \ + 2>/dev/null || echo "none") + echo " ${pod}: ${actual}" + done + + echo + echo "============================" + echo "kubectl get pxc -o yaml" + echo "============================" + kubectl_bin get pxc -o yaml || true + + echo + echo "============================" + echo "Operator logs" + echo "============================" + local operator_pod + operator_pod=$(get_operator_pod) + + if [[ -n "${operator_pod}" ]]; then + echo "Operator pod: ${operator_pod}" + kubectl_bin logs "${operator_pod}" || true + else + echo "ERROR: operator pod not found" + fi + return 1 } From f99115fdd9cc9f4051a3e5642177280c3746f51f Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Mon, 15 Dec 2025 15:47:11 +0100 Subject: [PATCH 15/21] fix test --- e2e-tests/smart-update3/run | 118 +++--------------------------------- 1 file changed, 8 insertions(+), 110 deletions(-) diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 9b44e5455c..6be448d0a9 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -22,7 +22,7 @@ else fi VS_URL="http://version-service" VS_PORT="11000" -VS_ENDPOINT="${VS_URL}:${VS_PORT}" +VS_ENDPOINT=${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT} function deploy_version_service { desc 'install version service' @@ -48,57 +48,12 @@ function add_pxc_version_to_vs { sleep 10 } -#function wait_pmm_update { -# local cluster=${1} -# local cluster_size=${2} -# local expected_image=${3} -# local pmm_version=${4} # e.g., "2" or "3" -# local max_retry=${5:-240} # Default 20 minutes -# -# desc "Waiting for PMM${pmm_version} containers to update to ${expected_image}..." -# set +x -# local retry=0 -# echo -n "Waiting for PMM update" -# -# until [[ $retry -ge $max_retry ]]; do -# local updated_count=0 -# for i in $(seq 0 $((cluster_size - 1))); do -# local actual_pmm_image=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "") -# -# # Only check if actual image contains expected image (handles docker.io prefix) -# if [[ "${actual_pmm_image}" == *"${expected_image}"* ]]; then -# ((updated_count += 1)) -# fi -# done -# -# if [[ ${updated_count} -eq ${cluster_size} ]]; then -# echo " Done! All ${cluster_size} pods updated." -# set -x -# return 0 -# fi -# -# echo -n "." -# ((retry += 1)) -# sleep 5 -# done -# -# # Timeout reached -# set -x -# echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" -# echo "Expected image: ${expected_image}" -# for i in $(seq 0 $((cluster_size - 1))); do -# local actual=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "none") -# echo " ${cluster}-pxc-${i}: ${actual}" -# done -# return 1 -#} - function wait_pmm_update { local cluster=${1} local cluster_size=${2} local expected_image=${3} - local pmm_version=${4} # e.g., "2" or "3" - local max_retry=${5:-240} # Default 20 minutes + local pmm_version=${4} # e.g., "2" or "3" + local max_retry=${5:-240} # Default 20 minutes desc "Waiting for PMM${pmm_version} containers to update to ${expected_image}..." set +x @@ -122,30 +77,19 @@ function wait_pmm_update { -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' \ 2>/dev/null || echo "") - if [[ -z "${actual_pmm_image}" ]]; then - echo " ${pod}: pmm-client container NOT FOUND yet" - continue - fi - + # Only check if actual image contains expected image (handles docker.io prefix) if [[ "${actual_pmm_image}" == *"${expected_image}"* ]]; then - echo " ${pod}: ${actual_pmm_image}" ((updated_count += 1)) - else - echo " ${pod}: ${actual_pmm_image}" fi done if [[ ${updated_count} -eq ${cluster_size} ]]; then - echo - echo "Done! All ${cluster_size} pods updated." + echo " Done! All ${cluster_size} pods updated." set -x return 0 fi - echo "Updated: ${updated_count}/${cluster_size}" - echo "Waiting..." - echo - + echo -n "." ((retry += 1)) sleep 5 done @@ -154,56 +98,10 @@ function wait_pmm_update { set -x echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" echo "Expected image: ${expected_image}" - echo "Final pod states:" - for i in $(seq 0 $((cluster_size - 1))); do - local pod="${cluster}-pxc-${i}" - local actual - actual=$(kubectl_bin get pod "${pod}" \ - -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' \ - 2>/dev/null || echo "none") - echo " ${pod}: ${actual}" + local actual=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "none") + echo " ${cluster}-pxc-${i}: ${actual}" done - - # ========================= - # Timeout reached — DEBUG - # ========================= - set -x - echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" - echo - echo "Expected image: ${expected_image}" - echo - echo "Final pod states (pmm-client images):" - - for i in $(seq 0 $((cluster_size - 1))); do - local pod="${cluster}-pxc-${i}" - local actual - actual=$(kubectl_bin get pod "${pod}" \ - -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' \ - 2>/dev/null || echo "none") - echo " ${pod}: ${actual}" - done - - echo - echo "============================" - echo "kubectl get pxc -o yaml" - echo "============================" - kubectl_bin get pxc -o yaml || true - - echo - echo "============================" - echo "Operator logs" - echo "============================" - local operator_pod - operator_pod=$(get_operator_pod) - - if [[ -n "${operator_pod}" ]]; then - echo "Operator pod: ${operator_pod}" - kubectl_bin logs "${operator_pod}" || true - else - echo "ERROR: operator pod not found" - fi - return 1 } From 335f04b9bdf6ea6c7afc7c7f91fe13ec27a78b5e Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Wed, 17 Dec 2025 19:11:00 +0100 Subject: [PATCH 16/21] update test --- .../conf/operator.9.9.9.pxc-operator.dep.json | 34 ++----------- .../conf/operator.9.9.9.pxc-operator.json | 36 ++++---------- .../conf/smart-update-haproxy.yml | 2 +- ...smart-update-version-service-reachable.yml | 2 +- ...art-update-version-service-unreachable.yml | 2 +- e2e-tests/smart-update1/conf/smart-update.yml | 2 +- e2e-tests/smart-update2/run | 2 - e2e-tests/smart-update3/conf/secrets.yml | 2 +- .../smart-update3/conf/smart-update-pmm3.yml | 6 +-- e2e-tests/smart-update3/run | 49 ++++++------------- 10 files changed, 39 insertions(+), 98 deletions(-) diff --git a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json index 93d471424f..1a4604c832 100644 --- a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json +++ b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json @@ -1,35 +1,11 @@ { "backup": { - "8.4.0": { - "and": [ - { - ">=": [ - { - "var": "productVersion" - }, - "8.4" - ] - } - ] - }, - "8.0.14": { - "and": [ + "8.0.11": { + ">=": [ { - ">=": [ - { - "var": "productVersion" - }, - "8.0" - ] + "var": "productVersion" }, - { - "<": [ - { - "var": "productVersion" - }, - "8.4" - ] - } + "8.0" ] }, "2.4.20": { @@ -53,4 +29,4 @@ ] } } -} +} \ No newline at end of file diff --git a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json index c0fbe7a1c5..e2aca57b78 100644 --- a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json @@ -5,18 +5,6 @@ "product": "pxc-operator", "matrix": { "pxc": { - "8.4.6-6.1": { - "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", - "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", - "status": "available", - "critical": false - }, - "8.4.5-5.1": { - "image_path": "percona/percona-xtradb-cluster:8.4.5-5.1", - "image_hash": "918c54c11c96bf61bb3f32315ef6b344b7b1d68a0457a47a3804eca3932b2b17", - "status": "available", - "critical": false - }, "8.0.43-34.1": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", "image_hash": "b19a314ae7f6c0f21a52edc4e70b250f91370f462721c0116cca4a7fef0c1acd", @@ -79,15 +67,15 @@ } }, "pmm": { - "2.44.1-1": { - "image_path": "percona/pmm-client:2.44.1-1", - "image_hash": "52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", + "2.0.0": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pmm", + "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", "status": "recommended", "critical": false }, - "3.4.1": { - "image_path": "percona/pmm-client:3.4.1", - "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", + "1.17.1": { + "image_path": "percona/percona-xtradb-cluster-operator:1.6.0-pmm", + "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", "status": "recommended", "critical": false } @@ -109,12 +97,6 @@ } }, "backup": { - "8.4.0-4.1": { - "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup", - "image_hash": "sha256:40a22aa9f83d08c4a79db4a947cdab2e316d7e03535ae8874c6e6ec7bfd11938", - "status": "available", - "critical": false - }, "8.0.14": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup", "image_hash": "3d57e1174bac5c1c10b253437205682445c1f72c9b2b462bc8375e211c0265b5", @@ -136,5 +118,7 @@ "critical": false } } - ] -} + } + } + ] +} \ No newline at end of file diff --git a/e2e-tests/smart-update1/conf/smart-update-haproxy.yml b/e2e-tests/smart-update1/conf/smart-update-haproxy.yml index fe77639a17..5d36197559 100644 --- a/e2e-tests/smart-update1/conf/smart-update-haproxy.yml +++ b/e2e-tests/smart-update1/conf/smart-update-haproxy.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: percona/pmm-client:2.44.1-1 + image: perconalab/pmm-client:1.17.1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml b/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml index fe77639a17..5d36197559 100644 --- a/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml +++ b/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: percona/pmm-client:2.44.1-1 + image: perconalab/pmm-client:1.17.1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml b/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml index c33a6f08dc..f26dfca4c8 100644 --- a/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml +++ b/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: percona/pmm-client:2.44.1-1 + image: perconalab/pmm-client:1.17.1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update.yml b/e2e-tests/smart-update1/conf/smart-update.yml index fe8273630d..fa58e6e869 100644 --- a/e2e-tests/smart-update1/conf/smart-update.yml +++ b/e2e-tests/smart-update1/conf/smart-update.yml @@ -50,7 +50,7 @@ spec: antiAffinityTopologyKey: "kubernetes.io/hostname" pmm: enabled: false - image: percona/pmm-client:2.44.1-1 + image: perconalab/pmm-client:1.17.1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/run b/e2e-tests/smart-update2/run index bc50b8a5b7..5c97322120 100755 --- a/e2e-tests/smart-update2/run +++ b/e2e-tests/smart-update2/run @@ -168,8 +168,6 @@ function check_telemetry_transfer() { desc "telemetry was disabled in operator but not in CR" if [[ "${cr_vs_channel}" == "${image_prefix}-recommended" || "${cr_vs_channel}" == "${image_prefix}-latest" ]] && [ "${telemetry_state}" == 'disabled' ]; then desc "cr VS should have telemetry" - cat "${test_dir}/compare/${telemetry_cr_log_file}" - cat "${tmp_dir}/${telemetry_state}_telemetry.version-service-cr.log.json" diff "${test_dir}/compare/${telemetry_cr_log_file}" <(grep -f "${tmp_dir}/${telemetry_state}_telemetry.version-service-cr.log.json" "${test_dir}/compare/${telemetry_cr_log_file}") desc "operator VS should not have telemetry" [[ -s ${tmp_dir}/disabled_telemetry.version-service.log.json ]] && exit 1 diff --git a/e2e-tests/smart-update3/conf/secrets.yml b/e2e-tests/smart-update3/conf/secrets.yml index 40c399c034..3993626eaf 100644 --- a/e2e-tests/smart-update3/conf/secrets.yml +++ b/e2e-tests/smart-update3/conf/secrets.yml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Secret metadata: - name: my-cluster-secrets + name: smart-update type: Opaque data: root: cm9vdF9wYXNzd29yZA== diff --git a/e2e-tests/smart-update3/conf/smart-update-pmm3.yml b/e2e-tests/smart-update3/conf/smart-update-pmm3.yml index 2c3342e773..6d91497502 100644 --- a/e2e-tests/smart-update3/conf/smart-update-pmm3.yml +++ b/e2e-tests/smart-update3/conf/smart-update-pmm3.yml @@ -9,13 +9,13 @@ spec: updateStrategy: SmartUpdate upgradeOptions: versionServiceEndpoint: https://127.0.0.1/versions - apply: recommended + apply: never schedule: "0 4 * * *" - secretsName: my-cluster-secrets + secretsName: smart-update pause: false pxc: size: 3 - image: -pxc + image: percona/percona-xtradb-cluster:8.0.20-11.1 resources: requests: memory: 2Gi diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 6be448d0a9..674fac4499 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -10,16 +10,11 @@ test_dir=$(realpath $(dirname $0)) set_debug API='pxc.percona.com/v9-9-9' -TARGET_IMAGE_PXC=${IMAGE_PXC} + CLUSTER="smart-update" CLUSTER_SIZE=3 PROXY_SIZE=2 -if [[ ${TARGET_IMAGE_PXC} == *"percona-xtradb-cluster-operator"* ]]; then - PXC_VER=$(echo -n "${TARGET_IMAGE_PXC}" | $sed -r 's/.*([0-9].[0-9])$/\1/') -else - PXC_VER=$(echo -n "${TARGET_IMAGE_PXC}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') -fi VS_URL="http://version-service" VS_PORT="11000" VS_ENDPOINT=${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT} @@ -33,21 +28,6 @@ function deploy_version_service { sleep 10 } -function add_pxc_version_to_vs { - local pxc_version=${1} - local pxc_image=${2} - - desc "Adding PXC version ${pxc_version} to version service" - kubectl_bin get configmap versions -o json | \ - jq --arg ver "${pxc_version}" --arg img "${pxc_image}" \ - '.data["operator.9.9.9.pxc-operator.json"] |= (fromjson | .versions[0].matrix.pxc += {($ver): {"imagePath": $img, "imageHash": "abc123", "status": "available", "critical": false}} | tojson)' | \ - kubectl_bin apply -f - - - # Restart version service to reload config - kubectl_bin delete pod -l run=version-service - sleep 10 -} - function wait_pmm_update { local cluster=${1} local cluster_size=${2} @@ -120,21 +100,24 @@ function main() { desc 'install PMM3 Server' deploy_pmm3_server - # Prepare PMM3 cluster config - cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yml" "${tmp_dir}/${CLUSTER}-pmm3.yml" - yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + desc 'add PMM3 token to secret' + TOKEN=$(get_pmm_server_token "operator") - spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yml" 3 10 "${test_dir}/conf/secrets.yml" + kubectl_bin apply -f "${conf_dir}/client.yml" - desc 'add PMM3 token to secret' - TOKEN=$(get_pmm_server_token "operator") - kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "'"$TOKEN"'"}}' + cp -f "${test_dir}/conf/secrets.yml" "${tmp_dir}/secrets.yml" + yq -i eval ".stringData.pmmservertoken = \"${TOKEN}\"" "${tmp_dir}/secrets.yml" + kubectl_bin apply -f "${tmp_dir}/secrets.yml" + + # Prepare PMM3 cluster config + cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yml" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq "${tmp_dir}/${CLUSTER}-pmm3.yml" \ + | yq eval ".spec.initContainer.image=\"${IMAGE}\"" \ + | yq eval ".spec.backup.image=\"${IMAGE_BACKUP}\"" \ + | yq eval ".spec.haproxy.image=\"${IMAGE_HAPROXY}\"" \ + | kubectl_bin apply -f - - # Get actual PXC version from running cluster and add to version service wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" - ACTUAL_PXC_VERSION=$(kubectl_bin get pxc "${CLUSTER}" -o jsonpath='{.status.pxc.version}') - desc "Detected PXC version for PMM3: ${ACTUAL_PXC_VERSION}" - add_pxc_version_to_vs "${ACTUAL_PXC_VERSION}" "${IMAGE_PXC}" # Get initial PMM3 image initial_pmm3_image=$(kubectl_bin get pod "${CLUSTER}-pxc-0" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') @@ -170,7 +153,7 @@ function main() { # Verify PMM3 metrics are being collected desc 'verify PMM3 metrics are being collected' sleep 60 - TOKEN=$(getSecretData "my-cluster-secrets" "pmmservertoken") + get_metric_values_pmm3 haproxy_backend_status $namespace-${CLUSTER}-haproxy-0 $TOKEN get_metric_values_pmm3 haproxy_backend_active_servers $namespace-${CLUSTER}-haproxy-0 $TOKEN desc "PMM3 metrics verified successfully" From 3c58cdc1b4ebaa33716629a61b7cfd0c71a8db25 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Thu, 18 Dec 2025 11:08:03 +0100 Subject: [PATCH 17/21] fix images --- .../conf/operator.9.9.9.pxc-operator.json | 16 ++++++++-------- .../smart-update3/conf/smart-update-pmm3.yml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json index 0701f8f6af..34643ad594 100644 --- a/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json @@ -17,21 +17,21 @@ "status": "available", "critical": false }, - "8.0.20-11.2": { + "8.0.43-34.1": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", - "image_hash": "feda5612db18da824e971891d6084465aa9cdc9918c18001cd95ba30916da78b", + "image_hash": "b19a314ae7f6c0f21a52edc4e70b250f91370f462721c0116cca4a7fef0c1acd", "status": "recommended", "critical": false }, - "8.0.20-11.1": { - "image_path": "percona/percona-xtradb-cluster:8.0.20-11.1", - "image_hash": "54b1b2f5153b78b05d651034d4603a13e685cbb9b45bfa09a39864fa3f169349", + "8.0.42-33.1": { + "image_path": "percona/percona-xtradb-cluster:8.0.42-33.1", + "image_hash": "31d8861fb30928625ed002d37a2732aa6c81a8b48630050914ba56b2af05ece9", "status": "available", "critical": false }, - "8.0.19-10.1": { - "image_path": "percona/percona-xtradb-cluster:8.0.19-10.1", - "image_hash": "1058ae8eded735ebdf664807aad7187942fc9a1170b3fd0369574cb61206b63a", + "8.0.41-32.1": { + "image_path": "percona/percona-xtradb-cluster:8.0.41-32.1", + "image_hash": "168ffb252d533b856a74820dea51c155bf5a8cb6a806a4d8a2e387ed7417a733", "status": "available", "critical": false }, diff --git a/e2e-tests/smart-update3/conf/smart-update-pmm3.yml b/e2e-tests/smart-update3/conf/smart-update-pmm3.yml index 6d91497502..678195346c 100644 --- a/e2e-tests/smart-update3/conf/smart-update-pmm3.yml +++ b/e2e-tests/smart-update3/conf/smart-update-pmm3.yml @@ -15,7 +15,7 @@ spec: pause: false pxc: size: 3 - image: percona/percona-xtradb-cluster:8.0.20-11.1 + image: percona/percona-xtradb-cluster:8.0.42-33.1 resources: requests: memory: 2Gi From 1f1f2ad3bca420bb5c325abaf7ba4f2bc2c43402 Mon Sep 17 00:00:00 2001 From: Viacheslav Sarzhan Date: Thu, 18 Dec 2025 21:53:54 +0200 Subject: [PATCH 18/21] fix test --- e2e-tests/smart-update3/run | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 674fac4499..485ef2ca95 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -159,14 +159,15 @@ function main() { desc "PMM3 metrics verified successfully" - kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yml" - kubectl_bin delete pvc --all - desc 'cleanup' + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yml" helm uninstall monitoring + kubectl_bin delete -f "${test_dir}/conf/vs.yml" + kubectl_bin delete pvc --all + kubectl_bin delete secrets --all destroy "${namespace}" desc "test passed" } -main \ No newline at end of file +main From ff2ea66fd332d3f26cfd3eab88a74efe1f39a49b Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Fri, 19 Dec 2025 15:40:43 +0100 Subject: [PATCH 19/21] fix test --- .../conf/operator.9.9.9.pxc-operator.dep.json | 32 +++++++++++++-- .../conf/operator.9.9.9.pxc-operator.json | 40 +++++++++++++++---- .../conf/operator.9.9.9.pxc-operator.json | 4 +- 3 files changed, 64 insertions(+), 12 deletions(-) diff --git a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json index 1a4604c832..62e6992b57 100644 --- a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json +++ b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json @@ -1,11 +1,35 @@ { "backup": { - "8.0.11": { - ">=": [ + "8.4.0": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, + "8.0.14": { + "and": [ { - "var": "productVersion" + ">=": [ + { + "var": "productVersion" + }, + "8.0" + ] }, - "8.0" + { + "<": [ + { + "var": "productVersion" + }, + "8.4" + ] + } ] }, "2.4.20": { diff --git a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json index e2aca57b78..3c9f7b8ecb 100644 --- a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json @@ -5,6 +5,18 @@ "product": "pxc-operator", "matrix": { "pxc": { + "8.4.6-6.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", + "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", + "status": "available", + "critical": false + }, + "8.4.5-5.1": { + "image_path": "percona/percona-xtradb-cluster:8.4.5-5.1", + "image_hash": "918c54c11c96bf61bb3f32315ef6b344b7b1d68a0457a47a3804eca3932b2b17", + "status": "available", + "critical": false + }, "8.0.43-34.1": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", "image_hash": "b19a314ae7f6c0f21a52edc4e70b250f91370f462721c0116cca4a7fef0c1acd", @@ -67,15 +79,15 @@ } }, "pmm": { - "2.0.0": { - "image_path": "perconalab/percona-xtradb-cluster-operator:main-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "2.44.1-1": { + "image_path": "percona/pmm-client:2.44.1-1", + "image_hash": "52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "status": "recommended", "critical": false }, - "1.17.1": { - "image_path": "percona/percona-xtradb-cluster-operator:1.6.0-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "3.4.1": { + "image_path": "percona/pmm-client:3.4.1", + "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", "status": "recommended", "critical": false } @@ -97,6 +109,12 @@ } }, "backup": { + "8.4.0-4.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup", + "image_hash": "sha256:40a22aa9f83d08c4a79db4a947cdab2e316d7e03535ae8874c6e6ec7bfd11938", + "status": "available", + "critical": false + }, "8.0.14": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup", "image_hash": "3d57e1174bac5c1c10b253437205682445c1f72c9b2b462bc8375e211c0265b5", @@ -110,6 +128,14 @@ "critical": false } }, + "log_collector": { + "1.16.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-logcollector", + "image_hash": "122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9", + "status": "recommended", + "critical": false + } + }, "operator": { "9.9.9": { "image_path": "percona/percona-xtradb-cluster-operator:main", @@ -121,4 +147,4 @@ } } ] -} \ No newline at end of file +} diff --git a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json index 34e9b9b741..3c9f7b8ecb 100644 --- a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json @@ -144,5 +144,7 @@ "critical": false } } - ] + } + } + ] } From bcc17161bc26df43aaa910e0feffbd1c2c4ff6db Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Sat, 20 Dec 2025 11:57:05 +0100 Subject: [PATCH 20/21] fix pmm version --- e2e-tests/functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e-tests/functions b/e2e-tests/functions index 44d8d7af5b..fdd197a96f 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -23,8 +23,8 @@ export IMAGE_BACKUP=${IMAGE_BACKUP:-"perconalab/percona-xtradb-cluster-operator: export IMAGE_LOGCOLLECTOR=${IMAGE_LOGCOLLECTOR:-"perconalab/fluentbit:main-logcollector"} export IMAGE_PMM_CLIENT=${IMAGE_PMM_CLIENT:-"perconalab/pmm-client:dev-latest"} export IMAGE_PMM_SERVER=${IMAGE_PMM_SERVER:-"perconalab/pmm-server:dev-latest"} -export IMAGE_PMM3_CLIENT=${IMAGE_PMM3_CLIENT:-"perconalab/pmm-client:3.4.0"} -export IMAGE_PMM3_SERVER=${IMAGE_PMM3_SERVER:-"perconalab/pmm-server:3.4.0"} +export IMAGE_PMM3_CLIENT=${IMAGE_PMM3_CLIENT:-"perconalab/pmm-client:3-dev-latest"} +export IMAGE_PMM3_SERVER=${IMAGE_PMM3_SERVER:-"perconalab/pmm-server:3-dev-latest"} if oc get projects 2>/dev/null; then OPENSHIFT=$(oc version -o json | jq -r '.openshiftVersion' | grep -oE '^[0-9]+\.[0-9]+') From 044e8058520cf60cc6dae98d674090088304bde8 Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Tue, 23 Dec 2025 10:53:19 +0100 Subject: [PATCH 21/21] delete unnessesary set -x --- e2e-tests/smart-update3/run | 2 -- 1 file changed, 2 deletions(-) diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run index 485ef2ca95..705da5e2e4 100755 --- a/e2e-tests/smart-update3/run +++ b/e2e-tests/smart-update3/run @@ -65,7 +65,6 @@ function wait_pmm_update { if [[ ${updated_count} -eq ${cluster_size} ]]; then echo " Done! All ${cluster_size} pods updated." - set -x return 0 fi @@ -75,7 +74,6 @@ function wait_pmm_update { done # Timeout reached - set -x echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" echo "Expected image: ${expected_image}" for i in $(seq 0 $((cluster_size - 1))); do