diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index e29b02e676..a1749402de 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -1,26 +1,35 @@ auto-tuning,8.0 allocator,8.0 +allocator,8.4 backup-storage-tls,8.0 +backup-storage-tls,8.4 cross-site,8.0 custom-users,8.0 demand-backup-cloud,8.0 +demand-backup-cloud,8.4 demand-backup-cloud-pxb,8.0 demand-backup-encrypted-with-tls,8.0 demand-backup-encrypted-with-tls,8.4 demand-backup-encrypted-with-tls-pxb,8.0 demand-backup-encrypted-with-tls-pxb,8.4 demand-backup,8.0 +demand-backup,8.4 demand-backup-flow-control,8.0 +demand-backup-flow-control,8.4 demand-backup-parallel,8.0 +demand-backup-parallel,8.4 demand-backup-without-passwords,8.0 +demand-backup-without-passwords,8.4 extra-pvc,8.0 haproxy,5.7 haproxy,8.0 +haproxy,8.4 init-deploy,5.7 init-deploy,8.0 limits,8.0 monitoring-2-0,8.0 monitoring-pmm3,8.0 +monitoring-pmm3,8.4 one-pod,5.7 one-pod,8.0 pitr,8.0 @@ -40,18 +49,23 @@ scaling-proxysql,8.0 scaling,8.0 scheduled-backup,5.7 scheduled-backup,8.0 +scheduled-backup,8.4 security-context,8.0 smart-update1,8.0 smart-update2,8.0 +smart-update1,8.4 +smart-update2,8.4 +smart-update3,8.0 storage,8.0 tls-issue-cert-manager-ref,8.0 tls-issue-cert-manager,8.0 tls-issue-self,8.0 upgrade-consistency,8.0 +upgrade-consistency,8.4 upgrade-haproxy,5.7 upgrade-haproxy,8.0 upgrade-proxysql,5.7 upgrade-proxysql,8.0 users,5.7 users,8.0 -validation-hook,8.0 +validation-hook,8.0 \ No newline at end of file diff --git a/e2e-tests/run-release.csv b/e2e-tests/run-release.csv index 0cbb1f74a9..3f4d6563d9 100644 --- a/e2e-tests/run-release.csv +++ b/e2e-tests/run-release.csv @@ -36,6 +36,7 @@ self-healing-advanced-chaos self-healing-chaos smart-update1 smart-update2 +smart-update3 storage tls-issue-cert-manager tls-issue-cert-manager-ref diff --git a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json index fd2d63de4f..93d471424f 100644 --- a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json +++ b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.dep.json @@ -1,11 +1,35 @@ { "backup": { - "8.0.11": { - ">=": [ + "8.4.0": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, + "8.0.14": { + "and": [ { - "var": "productVersion" + ">=": [ + { + "var": "productVersion" + }, + "8.0" + ] }, - "8.0" + { + "<": [ + { + "var": "productVersion" + }, + "8.4" + ] + } ] }, "2.4.20": { diff --git a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json index a9d4ea0c69..34d3b61be4 100644 --- a/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update1/conf/operator.9.9.9.pxc-operator.json @@ -5,6 +5,18 @@ "product": "pxc-operator", "matrix": { "pxc": { + "8.4.6-6.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", + "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", + "status": "available", + "critical": false + }, + "8.4.5-5.1": { + "image_path": "percona/percona-xtradb-cluster:8.4.5-5.1", + "image_hash": "918c54c11c96bf61bb3f32315ef6b344b7b1d68a0457a47a3804eca3932b2b17", + "status": "available", + "critical": false + }, "8.0.20-11.2": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", "image_hash": "feda5612db18da824e971891d6084465aa9cdc9918c18001cd95ba30916da78b", @@ -67,15 +79,15 @@ } }, "pmm": { - "2.0.0": { - "image_path": "perconalab/percona-xtradb-cluster-operator:main-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "2.44.1-1": { + "image_path": "percona/pmm-client:2.44.1-1", + "image_hash": "52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "status": "recommended", "critical": false }, - "1.17.1": { - "image_path": "percona/percona-xtradb-cluster-operator:1.6.0-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "3.4.1": { + "image_path": "percona/pmm-client:3.4.1", + "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", "status": "recommended", "critical": false } @@ -97,6 +109,12 @@ } }, "backup": { + "8.4.0-4.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup", + "image_hash": "sha256:40a22aa9f83d08c4a79db4a947cdab2e316d7e03535ae8874c6e6ec7bfd11938", + "status": "available", + "critical": false + }, "8.0.14": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup", "image_hash": "3d57e1174bac5c1c10b253437205682445c1f72c9b2b462bc8375e211c0265b5", diff --git a/e2e-tests/smart-update1/conf/smart-update-haproxy.yml b/e2e-tests/smart-update1/conf/smart-update-haproxy.yml index 5d36197559..fe77639a17 100644 --- a/e2e-tests/smart-update1/conf/smart-update-haproxy.yml +++ b/e2e-tests/smart-update1/conf/smart-update-haproxy.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml b/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml index 5d36197559..fe77639a17 100644 --- a/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml +++ b/e2e-tests/smart-update1/conf/smart-update-version-service-reachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml b/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml index f26dfca4c8..c33a6f08dc 100644 --- a/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml +++ b/e2e-tests/smart-update1/conf/smart-update-version-service-unreachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/smart-update.yml b/e2e-tests/smart-update1/conf/smart-update.yml index fa58e6e869..fe8273630d 100644 --- a/e2e-tests/smart-update1/conf/smart-update.yml +++ b/e2e-tests/smart-update1/conf/smart-update.yml @@ -50,7 +50,7 @@ spec: antiAffinityTopologyKey: "kubernetes.io/hostname" pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update1/conf/vs.yml b/e2e-tests/smart-update1/conf/vs.yml index f1513b3fac..21210cd158 100644 --- a/e2e-tests/smart-update1/conf/vs.yml +++ b/e2e-tests/smart-update1/conf/vs.yml @@ -18,7 +18,7 @@ spec: - env: - name: SERVE_HTTP value: "true" - image: perconalab/version-service:main-e378a19 + image: perconalab/version-service:main-latest imagePullPolicy: IfNotPresent name: version-service ports: diff --git a/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4-cw.log.json b/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4-cw.log.json new file mode 100644 index 0000000000..18fffb1a82 --- /dev/null +++ b/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4-cw.log.json @@ -0,0 +1,21 @@ +{ + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.request.content": { + "msg": { + "product": "pxc-operator", + "operatorVersion": "9.9.9", + "apply": "8.4-latest", + "clusterWideEnabled": true + } + } +} +{ + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.code": "OK" +} diff --git a/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json b/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json new file mode 100644 index 0000000000..ebb9af6ed5 --- /dev/null +++ b/e2e-tests/smart-update2/compare/disabled_telemetry.version-service-cr-8.4.log.json @@ -0,0 +1,20 @@ +{ + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.request.content": { + "msg": { + "product": "pxc-operator", + "operatorVersion": "9.9.9", + "apply": "8.4-latest", + } + } +} +{ + "system": "grpc", + "span.kind": "server", + "grpc.service": "version.VersionService", + "grpc.method": "Apply", + "grpc.code": "OK" +} diff --git a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json index efefe44722..93d471424f 100644 --- a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json +++ b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.dep.json @@ -1,11 +1,35 @@ { "backup": { + "8.4.0": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, "8.0.14": { - ">=": [ + "and": [ { - "var": "productVersion" + ">=": [ + { + "var": "productVersion" + }, + "8.0" + ] }, - "8.0" + { + "<": [ + { + "var": "productVersion" + }, + "8.4" + ] + } ] }, "2.4.20": { diff --git a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json index 0ff8ff14d8..f93797cf0b 100644 --- a/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json +++ b/e2e-tests/smart-update2/conf/operator.9.9.9.pxc-operator.json @@ -5,6 +5,18 @@ "product": "pxc-operator", "matrix": { "pxc": { + "8.4.6-6.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", + "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", + "status": "available", + "critical": false + }, + "8.4.5-5.1": { + "image_path": "percona/percona-xtradb-cluster:8.4.5-5.1", + "image_hash": "918c54c11c96bf61bb3f32315ef6b344b7b1d68a0457a47a3804eca3932b2b17", + "status": "available", + "critical": false + }, "8.0.20-11.2": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", "image_hash": "feda5612db18da824e971891d6084465aa9cdc9918c18001cd95ba30916da78b", @@ -67,15 +79,15 @@ } }, "pmm": { - "2.0.0": { - "image_path": "perconalab/percona-xtradb-cluster-operator:main-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "2.44.1-1": { + "image_path": "percona/pmm-client:2.44.1-1", + "image_hash": "52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", "status": "recommended", "critical": false }, - "1.17.1": { - "image_path": "percona/percona-xtradb-cluster-operator:1.6.0-pmm", - "image_hash": "28bbb6693689a15c407c85053755334cd25d864e632ef7fed890bc85726cfb68", + "3.4.1": { + "image_path": "percona/pmm-client:3.4.1", + "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", "status": "recommended", "critical": false } @@ -97,6 +109,12 @@ } }, "backup": { + "8.4.0-4.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup", + "image_hash": "sha256:40a22aa9f83d08c4a79db4a947cdab2e316d7e03535ae8874c6e6ec7bfd11938", + "status": "available", + "critical": false + }, "8.0.14": { "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup", "image_hash": "3d57e1174bac5c1c10b253437205682445c1f72c9b2b462bc8375e211c0265b5", diff --git a/e2e-tests/smart-update2/conf/smart-update-haproxy.yml b/e2e-tests/smart-update2/conf/smart-update-haproxy.yml index 5d36197559..fe77639a17 100644 --- a/e2e-tests/smart-update2/conf/smart-update-haproxy.yml +++ b/e2e-tests/smart-update2/conf/smart-update-haproxy.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/conf/smart-update-version-service-reachable.yml b/e2e-tests/smart-update2/conf/smart-update-version-service-reachable.yml index 5d36197559..fe77639a17 100644 --- a/e2e-tests/smart-update2/conf/smart-update-version-service-reachable.yml +++ b/e2e-tests/smart-update2/conf/smart-update-version-service-reachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml b/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml index f26dfca4c8..e94b8404b9 100644 --- a/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml +++ b/e2e-tests/smart-update2/conf/smart-update-version-service-unreachable.yml @@ -63,7 +63,7 @@ spec: gracePeriod: 30 pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.0 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/conf/smart-update.yml b/e2e-tests/smart-update2/conf/smart-update.yml index fa58e6e869..fe8273630d 100644 --- a/e2e-tests/smart-update2/conf/smart-update.yml +++ b/e2e-tests/smart-update2/conf/smart-update.yml @@ -50,7 +50,7 @@ spec: antiAffinityTopologyKey: "kubernetes.io/hostname" pmm: enabled: false - image: perconalab/pmm-client:1.17.1 + image: percona/pmm-client:2.44.1-1 serverHost: monitoring-service serverUser: pmm backup: diff --git a/e2e-tests/smart-update2/conf/vs.yml b/e2e-tests/smart-update2/conf/vs.yml index f1513b3fac..21210cd158 100644 --- a/e2e-tests/smart-update2/conf/vs.yml +++ b/e2e-tests/smart-update2/conf/vs.yml @@ -18,7 +18,7 @@ spec: - env: - name: SERVE_HTTP value: "true" - image: perconalab/version-service:main-e378a19 + image: perconalab/version-service:main-latest imagePullPolicy: IfNotPresent name: version-service ports: diff --git a/e2e-tests/smart-update2/run b/e2e-tests/smart-update2/run index dc1a84f132..bc50b8a5b7 100755 --- a/e2e-tests/smart-update2/run +++ b/e2e-tests/smart-update2/run @@ -31,6 +31,13 @@ VS_URL="http://version-service" VS_PORT="11000" VS_ENDPOINT="${VS_URL}:${VS_PORT}" +# Determine update strategy based on PXC version +if [[ "${PXC_VER}" == "8.4" ]]; then + VS_UPDATE_STRATEGY="latest" +else + VS_UPDATE_STRATEGY="recommended" +fi + function get_pod_names_images { local cluster=${1} local type=${2:-pxc} @@ -156,10 +163,13 @@ function check_telemetry_transfer() { fi local image_prefix=${cr_vs_channel%'-recommended'} + image_prefix=${image_prefix%'-latest'} local telemetry_cr_log_file="${telemetry_state}_telemetry.version-service-cr-${image_prefix}${OPERATOR_NS:+-cw}.log.json" desc "telemetry was disabled in operator but not in CR" - if [ "${cr_vs_channel}" == "${image_prefix}-recommended" -a "${telemetry_state}" == 'disabled' ]; then + if [[ "${cr_vs_channel}" == "${image_prefix}-recommended" || "${cr_vs_channel}" == "${image_prefix}-latest" ]] && [ "${telemetry_state}" == 'disabled' ]; then desc "cr VS should have telemetry" + cat "${test_dir}/compare/${telemetry_cr_log_file}" + cat "${tmp_dir}/${telemetry_state}_telemetry.version-service-cr.log.json" diff "${test_dir}/compare/${telemetry_cr_log_file}" <(grep -f "${tmp_dir}/${telemetry_state}_telemetry.version-service-cr.log.json" "${test_dir}/compare/${telemetry_cr_log_file}") desc "operator VS should not have telemetry" [[ -s ${tmp_dir}/disabled_telemetry.version-service.log.json ]] && exit 1 @@ -217,7 +227,7 @@ function main() { sleep 30 wait_pod "$(get_operator_pod)" "480" "${OPERATOR_NS}" - check_telemetry_transfer "http://version-service-cr.${namespace}.svc.cluster.local:11000" "${IMAGE_PREFIX}-recommended" "disabled" + check_telemetry_transfer "http://version-service-cr.${namespace}.svc.cluster.local:11000" "${IMAGE_PREFIX}-${VS_UPDATE_STRATEGY}" "disabled" kubectl_bin delete pod -l run=version-service-cr kubectl_bin delete pod -l run=version-service check_telemetry_transfer "http://version-service-cr.${namespace}.svc.cluster.local:11000" "disabled" "disabled" @@ -237,8 +247,8 @@ function main() { fi ################################################## - desc 'PXC cluster update with recommended image by version service' - vs_image="recommended" + desc "PXC cluster update with ${VS_UPDATE_STRATEGY} image by version service" + vs_image="${VS_UPDATE_STRATEGY}" initial_primary=$(run_mysql 'SELECT @@hostname hostname;' "-h ${CLUSTER}-haproxy -uroot -proot_password") kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"'${vs_image}'","schedule": "* * * * *"}}}' diff --git a/e2e-tests/smart-update3/compare/select-1.sql b/e2e-tests/smart-update3/compare/select-1.sql new file mode 100644 index 0000000000..8e738f4cf2 --- /dev/null +++ b/e2e-tests/smart-update3/compare/select-1.sql @@ -0,0 +1 @@ +100500 diff --git a/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.dep.json b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.dep.json new file mode 100644 index 0000000000..93d471424f --- /dev/null +++ b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.dep.json @@ -0,0 +1,56 @@ +{ + "backup": { + "8.4.0": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, + "8.0.14": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "8.0" + ] + }, + { + "<": [ + { + "var": "productVersion" + }, + "8.4" + ] + } + ] + }, + "2.4.20": { + "and": [ + { + ">=": [ + { + "var": "productVersion" + }, + "5.7" + ] + }, + { + "<": [ + { + "var": "productVersion" + }, + "8.0" + ] + } + ] + } + } +} diff --git a/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json new file mode 100644 index 0000000000..f93797cf0b --- /dev/null +++ b/e2e-tests/smart-update3/conf/operator.9.9.9.pxc-operator.json @@ -0,0 +1,150 @@ +{ + "versions": [ + { + "operator": "9.9.9", + "product": "pxc-operator", + "matrix": { + "pxc": { + "8.4.6-6.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4", + "image_hash": "sha256:bee70295d85ade5044ff1adde70c5047fcc88109a9fbe109befe863372a60a1c", + "status": "available", + "critical": false + }, + "8.4.5-5.1": { + "image_path": "percona/percona-xtradb-cluster:8.4.5-5.1", + "image_hash": "918c54c11c96bf61bb3f32315ef6b344b7b1d68a0457a47a3804eca3932b2b17", + "status": "available", + "critical": false + }, + "8.0.20-11.2": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0", + "image_hash": "feda5612db18da824e971891d6084465aa9cdc9918c18001cd95ba30916da78b", + "status": "recommended", + "critical": false + }, + "8.0.20-11.1": { + "image_path": "percona/percona-xtradb-cluster:8.0.20-11.1", + "image_hash": "54b1b2f5153b78b05d651034d4603a13e685cbb9b45bfa09a39864fa3f169349", + "status": "available", + "critical": false + }, + "8.0.19-10.1": { + "image_path": "percona/percona-xtradb-cluster:8.0.19-10.1", + "image_hash": "1058ae8eded735ebdf664807aad7187942fc9a1170b3fd0369574cb61206b63a", + "status": "available", + "critical": false + }, + "5.7.31-31.45.2": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7", + "image_hash": "0decf85c7c7afacc438f5fe355dc8320ea7ffc7018ca2cb6bda3ac0c526ae172", + "status": "recommended", + "critical": false + }, + "5.7.31-31.45": { + "image_path": "percona/percona-xtradb-cluster:5.7.31-31.45", + "image_hash": "3852cef43cc0c6aa791463ba6279e59dcdac3a4fb1a5616c745c1b3c68041dc2", + "status": "available", + "critical": false + }, + "5.7.30-31.43": { + "image_path": "percona/percona-xtradb-cluster:5.7.30-31.43", + "image_hash": "b03a060e9261b37288a2153c78f86dcfc53367c36e1bcdcae046dd2d0b0721af", + "status": "available", + "critical": false + }, + "5.7.29-31.43": { + "image_path": "percona/percona-xtradb-cluster:5.7.29-31.43", + "image_hash": "85fb479de073770280ae601cf3ec22dc5c8cca4c8b0dc893b09503767338e6f9", + "status": "available", + "critical": false + }, + "5.7.28-31.41.2": { + "image_path": "percona/percona-xtradb-cluster:5.7.28-31.41.2", + "image_hash": "fccd6525aaeedb5e436e9534e2a63aebcf743c043526dd05dba8519ebddc8b30", + "status": "available", + "critical": true + }, + "5.7.27-31.39": { + "image_path": "percona/percona-xtradb-cluster:5.7.27-31.39", + "image_hash": "7d8eb4d2031c32c6e96451655f359d8e5e8e047dc95bada9a28c41c158876c26", + "status": "available", + "critical": false + }, + "5.7.26-31.37": { + "image_path": "percona/percona-xtradb-cluster:5.7.26-31.37", + "image_hash": "9d43d8e435e4aca5c694f726cc736667cb938158635c5f01a0e9412905f1327f", + "status": "available", + "critical": false + } + }, + "pmm": { + "2.44.1-1": { + "image_path": "percona/pmm-client:2.44.1-1", + "image_hash": "52a8fb5e8f912eef1ff8a117ea323c401e278908ce29928dafc23fac1db4f1e3", + "status": "recommended", + "critical": false + }, + "3.4.1": { + "image_path": "percona/pmm-client:3.4.1", + "image_hash": "1c59d7188f8404e0294f4bfb3d2c3600107f808a023668a170a6b8036c56619b", + "status": "recommended", + "critical": false + } + }, + "proxysql": { + "2.0.14": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-proxysql", + "image_hash": "5c0ee8cb56f3a9cd01b907c2edddc8265b9d84d58a48bae31f8ee460d40ad3d6", + "status": "recommended", + "critical": false + } + }, + "haproxy": { + "2.1.7": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-haproxy", + "image_hash": "59bcc3ae1e3aadb410a89ed266102045437753a82e79501caa74d40c529a9955", + "status": "recommended", + "critical": false + } + }, + "backup": { + "8.4.0-4.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup", + "image_hash": "sha256:40a22aa9f83d08c4a79db4a947cdab2e316d7e03535ae8874c6e6ec7bfd11938", + "status": "available", + "critical": false + }, + "8.0.14": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup", + "image_hash": "3d57e1174bac5c1c10b253437205682445c1f72c9b2b462bc8375e211c0265b5", + "status": "recommended", + "critical": false + }, + "2.4.20": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup", + "image_hash": "6b7e5f284e99553ab6a0c1dc3d8104b3e908d2bac8a71d52d2ea068c3df7d252", + "status": "recommended", + "critical": false + } + }, + "log_collector": { + "1.16.1": { + "image_path": "perconalab/percona-xtradb-cluster-operator:main-logcollector", + "image_hash": "122a103902d27890dceaf1855f175ea706a126aac940feb1089520029937f4a9", + "status": "recommended", + "critical": false + } + }, + "operator": { + "9.9.9": { + "image_path": "percona/percona-xtradb-cluster-operator:main", + "image_hash": "9871d6fb960b4ec498430a398a44eca08873591a6b6efb8a35349e79e24f3072", + "status": "recommended", + "critical": false + } + } + } + } + ] +} diff --git a/e2e-tests/smart-update3/conf/smart-update-pmm3.yml b/e2e-tests/smart-update3/conf/smart-update-pmm3.yml new file mode 100644 index 0000000000..2c3342e773 --- /dev/null +++ b/e2e-tests/smart-update3/conf/smart-update-pmm3.yml @@ -0,0 +1,80 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: smart-update + finalizers: + - percona.com/delete-pxc-pods-in-order +spec: + crVersion: 9.9.9 + updateStrategy: SmartUpdate + upgradeOptions: + versionServiceEndpoint: https://127.0.0.1/versions + apply: recommended + schedule: "0 4 * * *" + secretsName: my-cluster-secrets + pause: false + pxc: + size: 3 + image: -pxc + resources: + requests: + memory: 2Gi + cpu: "1" + limits: + memory: 2Gi + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 6Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + proxysql: + enabled: false + size: 2 + image: -proxysql + resources: + requests: + memory: 1Gi + cpu: "1" + limits: + memory: 1Gi + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + haproxy: + enabled: true + size: 2 + image: -haproxy + resources: + requests: + memory: 1G + cpu: 600m + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + gracePeriod: 30 + pmm: + enabled: true + image: percona/pmm-client:3.4.0 + serverHost: monitoring-service + serverUser: pmm + backup: + image: -backup + serviceAccountName: percona-xtradb-cluster-operator + storages: + pvc: + type: filesystem + volume: + persistentVolumeClaim: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi \ No newline at end of file diff --git a/e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml b/e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml new file mode 100644 index 0000000000..e94b8404b9 --- /dev/null +++ b/e2e-tests/smart-update3/conf/smart-update-version-service-unreachable.yml @@ -0,0 +1,80 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBCluster +metadata: + name: smart-update + finalizers: + - percona.com/delete-pxc-pods-in-order +spec: + crVersion: 9.9.9 + updateStrategy: SmartUpdate + upgradeOptions: + versionServiceEndpoint: https://127.0.0.1/versions + apply: recommended + schedule: "0 4 * * *" + secretsName: my-cluster-secrets + pause: false + pxc: + size: 3 + image: -pxc + resources: + requests: + memory: 2Gi + cpu: "1" + limits: + memory: 2Gi + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 6Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + proxysql: + enabled: false + size: 2 + image: -proxysql + resources: + requests: + memory: 1Gi + cpu: "1" + limits: + memory: 1Gi + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + haproxy: + enabled: true + size: 2 + image: -haproxy + resources: + requests: + memory: 1G + cpu: 600m + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + gracePeriod: 30 + pmm: + enabled: false + image: percona/pmm-client:2.44.0 + serverHost: monitoring-service + serverUser: pmm + backup: + image: -backup + serviceAccountName: percona-xtradb-cluster-operator + storages: + pvc: + type: filesystem + volume: + persistentVolumeClaim: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi diff --git a/e2e-tests/smart-update3/conf/vs.yml b/e2e-tests/smart-update3/conf/vs.yml new file mode 100644 index 0000000000..21210cd158 --- /dev/null +++ b/e2e-tests/smart-update3/conf/vs.yml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + run: version-service + name: version-service +spec: + replicas: 1 + selector: + matchLabels: + run: version-service + template: + metadata: + labels: + run: version-service + spec: + containers: + - env: + - name: SERVE_HTTP + value: "true" + image: perconalab/version-service:main-latest + imagePullPolicy: IfNotPresent + name: version-service + ports: + - containerPort: 11000 + protocol: TCP + volumeMounts: + - name: versions + mountPath: /sources/operator.9.9.9.pxc-operator.dep.json + subPath: operator.9.9.9.pxc-operator.dep.json + - name: versions + mountPath: /sources/operator.9.9.9.pxc-operator.json + subPath: operator.9.9.9.pxc-operator.json + volumes: + - name: versions + configMap: + name: versions +--- +apiVersion: v1 +kind: Service +metadata: + name: version-service +spec: + ports: + - port: 11000 + protocol: TCP + targetPort: 11000 + selector: + run: version-service + type: ClusterIP diff --git a/e2e-tests/smart-update3/run b/e2e-tests/smart-update3/run new file mode 100755 index 0000000000..830b19af76 --- /dev/null +++ b/e2e-tests/smart-update3/run @@ -0,0 +1,211 @@ +#!/bin/bash +# CASES: +# - Update PMM2 to recommended version via version service +# - Update PMM3 to recommended version via version service + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions + +set_debug + +API='pxc.percona.com/v9-9-9' +TARGET_IMAGE_PXC=${IMAGE_PXC} +CLUSTER="smart-update" +CLUSTER_SIZE=3 +PROXY_SIZE=2 + +if [[ ${TARGET_IMAGE_PXC} == *"percona-xtradb-cluster-operator"* ]]; then + PXC_VER=$(echo -n "${TARGET_IMAGE_PXC}" | $sed -r 's/.*([0-9].[0-9])$/\1/') +else + PXC_VER=$(echo -n "${TARGET_IMAGE_PXC}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') +fi +VS_URL="http://version-service" +VS_PORT="11000" +VS_ENDPOINT="${VS_URL}:${VS_PORT}" + +function deploy_version_service { + desc 'install version service' + kubectl_bin create configmap versions \ + --from-file "${test_dir}/conf/operator.9.9.9.pxc-operator.dep.json" \ + --from-file "${test_dir}/conf/operator.9.9.9.pxc-operator.json" + kubectl_bin apply -f "${test_dir}/conf/vs.yml" + sleep 10 +} + +function add_pxc_version_to_vs { + local pxc_version=${1} + local pxc_image=${2} + + desc "Adding PXC version ${pxc_version} to version service" + kubectl_bin get configmap versions -o json | \ + jq --arg ver "${pxc_version}" --arg img "${pxc_image}" \ + '.data["operator.9.9.9.pxc-operator.json"] |= (fromjson | .versions[0].matrix.pxc += {($ver): {"imagePath": $img, "imageHash": "abc123", "status": "available", "critical": false}} | tojson)' | \ + kubectl_bin apply -f - + + # Restart version service to reload config + kubectl_bin delete pod -l run=version-service + sleep 10 +} + +function wait_pmm_update { + local cluster=${1} + local cluster_size=${2} + local expected_image=${3} + local pmm_version=${4} # e.g., "2" or "3" + local max_retry=${5:-120} # Default 10 minutes + + desc "Waiting for PMM${pmm_version} containers to update to ${expected_image}..." + set +x + local retry=0 + echo -n "Waiting for PMM update" + + until [[ $retry -ge $max_retry ]]; do + local updated_count=0 + for i in $(seq 0 $((cluster_size - 1))); do + local actual_pmm_image=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "") + # Only check if actual image contains expected image (handles docker.io prefix) + if [[ "${actual_pmm_image}" == *"${expected_image}"* ]]; then + ((updated_count += 1)) + fi + done + + if [[ ${updated_count} -eq ${cluster_size} ]]; then + echo " Done! All ${cluster_size} pods updated." + set -x + return 0 + fi + + echo -n "." + ((retry += 1)) + sleep 5 + done + + # Timeout reached + set -x + echo "ERROR: Timeout waiting for PMM${pmm_version} update after $((max_retry * 5)) seconds" + echo "Expected image: ${expected_image}" + for i in $(seq 0 $((cluster_size - 1))); do + local actual=$(kubectl_bin get pod "${cluster}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}' 2>/dev/null || echo "none") + echo " ${cluster}-pxc-${i}: ${actual}" + done + return 1 +} + +function main() { + create_infra "${namespace}" + deploy_version_service + deploy_cert_manager + + kubectl_bin patch crd perconaxtradbclusters.pxc.percona.com --type='json' -p '[{"op":"add","path":"/spec/versions/-", "value":{"name": "v9-9-9","schema": {"openAPIV3Schema": {"properties": {"spec": {"type": "object","x-kubernetes-preserve-unknown-fields": true},"status": {"type": "object", "x-kubernetes-preserve-unknown-fields": true}}, "type": "object" }}, "served": true, "storage": false, "subresources": { "status": {}}}}]' + kubectl_bin ${OPERATOR_NS:+-n $OPERATOR_NS} set env deploy/percona-xtradb-cluster-operator "PERCONA_VS_FALLBACK_URI=http://version-service.${namespace}.svc.cluster.local:11000" + + ################################################## + desc 'PMM2 cluster update with the recommended image by version service' + + # Prepare cluster config + cp -f "${test_dir}/conf/${CLUSTER}-version-service-unreachable.yml" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + + # Get actual PXC version from running cluster and add to version service + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + ACTUAL_PXC_VERSION=$(kubectl_bin get pxc "${CLUSTER}" -o jsonpath='{.status.pxc.version}') + desc "Detected PXC version: ${ACTUAL_PXC_VERSION}" + add_pxc_version_to_vs "${ACTUAL_PXC_VERSION}" "${IMAGE_PXC}" + + # Enable PMM2 with older version + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"pmm":{"enabled":true,"image":"percona/pmm-client:2.44.0","serverHost":"monitoring-service"}}}' + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Get initial PMM2 image + initial_pmm2_image=$(kubectl_bin get pod "${CLUSTER}-pxc-0" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + desc "Initial PMM2 image: ${initial_pmm2_image}" + + # Get recommended PMM2 image from version service + pmm2_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:2")) | select(.value.status == "recommended") | .value.imagePath') + desc "Target PMM2 recommended image: ${pmm2_recommended_image}" + + # Update cluster to use version service with short schedule + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' + + # Wait for PMM2 update + wait_pmm_update "${CLUSTER}" "${CLUSTER_SIZE}" "${pmm2_recommended_image}" "2" || exit 1 + + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Verify PMM2 updated + for i in $(seq 0 $((CLUSTER_SIZE - 1))); do + actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + if [[ "${actual_pmm_image}" != *"${pmm2_recommended_image}"* ]]; then + echo "ERROR: PMM2 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm2_recommended_image}, Got: ${actual_pmm_image}" + exit 1 + fi + done + desc "PMM2 successfully updated to ${pmm2_recommended_image}" + + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-version-service-unreachable.yml" + kubectl_bin delete pvc --all + + ################################################## + desc 'PMM3 cluster update with the recommended image by version service' + + desc "Updating secret for PMM3 (pmmserver -> pmmservertoken)" + # Get current pmmserver value from my-cluster-secrets + pmm_password=$(kubectl_bin get secret my-cluster-secrets -o jsonpath='{.data.pmmserver}') + + # Patch my-cluster-secrets: remove pmmserver, add pmmservertoken + kubectl_bin patch secret my-cluster-secrets --type=json \ + -p '[{"op":"remove","path":"/data/pmmserver"},{"op":"add","path":"/data/pmmservertoken","value":"'${pmm_password}'"}]' + + # Prepare PMM3 cluster config + cp -f "${test_dir}/conf/${CLUSTER}-pmm3.yml" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq -i eval ".spec.initContainer.image = \"${IMAGE}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq -i eval ".spec.pxc.image = \"${IMAGE_PXC}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq -i eval ".spec.haproxy.image = \"${IMAGE_HAPROXY}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + yq -i eval ".spec.backup.image = \"${IMAGE_BACKUP}\"" "${tmp_dir}/${CLUSTER}-pmm3.yml" + spinup_pxc "${CLUSTER}" "${tmp_dir}/${CLUSTER}-pmm3.yml" + + # Get actual PXC version from running cluster and add to version service + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + ACTUAL_PXC_VERSION=$(kubectl_bin get pxc "${CLUSTER}" -o jsonpath='{.status.pxc.version}') + desc "Detected PXC version for PMM3: ${ACTUAL_PXC_VERSION}" + add_pxc_version_to_vs "${ACTUAL_PXC_VERSION}" "${IMAGE_PXC}" + + # Get initial PMM3 image + initial_pmm3_image=$(kubectl_bin get pod "${CLUSTER}-pxc-0" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + desc "Initial PMM3 image: ${initial_pmm3_image}" + + # Get recommended PMM3 image from version service + pmm3_recommended_image=$(kubectl_bin exec -ti "$(get_operator_pod)" ${OPERATOR_NS:+-n $OPERATOR_NS} -- curl -s "${VS_URL}.${namespace}.svc.cluster.local:${VS_PORT}/versions/v1/pxc-operator/9.9.9" | jq -r '.versions[].matrix.pmm | to_entries[] | select(.value.imagePath) | select(.value.imagePath | contains("pmm-client:3")) | select(.value.status == "recommended") | .value.imagePath') + desc "Target PMM3 recommended image: ${pmm3_recommended_image}" + + # Update cluster to use version service + kubectl_bin patch pxc/"${CLUSTER}" --type=merge -p '{"spec":{"upgradeOptions":{"versionServiceEndpoint":"'${VS_ENDPOINT}'","apply":"recommended","schedule": "* * * * *"}}}' + + # Wait for PMM3 update + wait_pmm_update "${CLUSTER}" "${CLUSTER_SIZE}" "${pmm3_recommended_image}" "3" || exit 1 + + wait_cluster_consistency "${CLUSTER}" "${CLUSTER_SIZE}" "${PROXY_SIZE}" + + # Verify PMM3 updated + for i in $(seq 0 $((CLUSTER_SIZE - 1))); do + actual_pmm_image=$(kubectl_bin get pod "${CLUSTER}-pxc-${i}" -o jsonpath='{.status.containerStatuses[?(@.name=="pmm-client")].image}') + if [[ "${actual_pmm_image}" != *"${pmm3_recommended_image}"* ]]; then + echo "ERROR: PMM3 image not updated on ${CLUSTER}-pxc-${i}. Expected: ${pmm3_recommended_image}, Got: ${actual_pmm_image}" + exit 1 + fi + done + desc "PMM3 successfully updated to ${pmm3_recommended_image}" + + kubectl_bin delete -f "${tmp_dir}/${CLUSTER}-pmm3.yml" + kubectl_bin delete pvc --all + + desc 'cleanup' + kubectl_bin delete -f "${test_dir}/conf/vs.yml" + destroy "${namespace}" + desc "test passed" +} + +main \ No newline at end of file