From 5a2e458829ed81acc8e21fa1b3a2a71ca7ad963b Mon Sep 17 00:00:00 2001
From: strtgbb <146047128+strtgbb@users.noreply.github.com>
Date: Tue, 15 Jul 2025 13:35:34 -0400
Subject: [PATCH 1/3] Resolve conflicts with v25.3.6.56-lts
---
.../10_project-antalya-bug-report.md | 36 +
.github/ISSUE_TEMPLATE/10_question.yaml | 20 -
.../ISSUE_TEMPLATE/20_feature-request.yaml | 38 -
.../20_project-antalya-feature-request.md | 20 +
.../30_project-antalya-question.md | 16 +
.../30_unexpected-behaviour.yaml | 50 -
.../35_incomplete_implementation.yaml | 50 -
.../40_altinity-stable-bug-report.md | 50 +
.../ISSUE_TEMPLATE/45_usability-issue.yaml | 50 -
.../50_altinity-stable-question.md | 16 +
.github/ISSUE_TEMPLATE/50_build-issue.yaml | 44 -
.../60_documentation-issue.yaml | 26 -
.../ISSUE_TEMPLATE/70_performance-issue.yaml | 44 -
.../80_backward-compatibility.yaml | 44 -
.github/ISSUE_TEMPLATE/85_bug-report.yaml | 76 -
.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml | 26 -
.../ISSUE_TEMPLATE/95_sanitizer-report.yaml | 26 -
.../96_installation-issues.yaml | 46 -
.github/PULL_REQUEST_TEMPLATE.md | 15 -
.github/actionlint.yml | 10 +-
.github/actions/common_setup/action.yml | 17 +
.../actions/create_workflow_report/action.yml | 40 +
.../ci_run_report.html.jinja | 269 ++
.../create_workflow_report.py | 661 +++
.github/actions/docker_setup/action.yml | 31 +
.github/grype/parse_vulnerabilities_grype.py | 32 +
.github/grype/run_grype_scan.sh | 18 +
.../grype/transform_and_upload_results_s3.sh | 13 +
.github/retry.sh | 22 +
.github/workflows/README.md | 13 +
.github/workflows/backport_branches.yml | 1433 +-----
.github/workflows/cancel.yml | 19 +
.github/workflows/cherry_pick.yml | 2 +-
.github/workflows/compare_fails.yml | 104 +
.github/workflows/create_release.yml | 2 +-
.github/workflows/docker_publish.yml | 120 +
.github/workflows/docker_test_images.yml | 32 +-
.github/workflows/grype_scan.yml | 152 +
.github/workflows/master.yml | 4272 +----------------
.github/workflows/merge_queue.yml | 347 +-
.github/workflows/nightly.yml | 4 +-
.github/workflows/pull_request.yml | 2 +-
.github/workflows/regression.yml | 731 +++
.github/workflows/release_branches.yml | 2173 ++-------
.github/workflows/repo-sanity-checks.yml | 150 +
.github/workflows/reusable_build.yml | 48 +-
.github/workflows/reusable_sign.yml | 166 +
.github/workflows/reusable_simple_job.yml | 2 +-
.github/workflows/reusable_test.yml | 104 +-
.github/workflows/scheduled_runs.yml | 55 +
.github/workflows/sign_and_release.yml | 426 ++
cmake/autogenerated_versions.txt | 8 +-
cmake/version.cmake | 9 +-
contrib/google-protobuf-cmake/CMakeLists.txt | 1 +
contrib/grpc-cmake/CMakeLists.txt | 2 +
contrib/openssl-cmake/CMakeLists.txt | 3 +
contrib/sparse-checkout/update-aws.sh | 1 +
docker/docs/builder/Dockerfile | 2 +-
docker/images.json | 85 +-
docker/keeper/Dockerfile | 2 +-
docker/packager/binary-builder/Dockerfile | 8 +-
docker/packager/binary-builder/build.sh | 3 +-
docker/packager/cctools/Dockerfile | 8 +-
docker/packager/packager | 33 +-
docker/server/README.md | 2 +-
docker/server/README.sh | 4 +-
docker/server/README.src/github-repo | 2 +-
docker/server/README.src/license.md | 2 +-
docker/server/README.src/logo.svg | 56 +-
docker/server/README.src/maintainer.md | 2 +-
docker/test/README.md | 2 +-
docker/test/base/Dockerfile | 4 +-
docker/test/clickbench/Dockerfile | 2 +-
docker/test/compatibility/centos/Dockerfile | 2 +-
docker/test/compatibility/ubuntu/Dockerfile | 2 +-
docker/test/fasttest/Dockerfile | 5 +-
docker/test/fuzzer/Dockerfile | 6 +-
docker/test/integration/base/Dockerfile | 6 +-
.../clickhouse_with_unity_catalog/Dockerfile | 4 +-
.../integration/helper_container/Dockerfile | 2 +-
.../test/integration/kerberos_kdc/Dockerfile | 2 +-
.../mysql_golang_client/Dockerfile | 2 +-
.../integration/mysql_java_client/Dockerfile | 2 +-
.../integration/mysql_js_client/Dockerfile | 2 +-
.../integration/mysql_php_client/Dockerfile | 2 +-
.../postgresql_java_client/Dockerfile | 2 +-
docker/test/integration/resolver/Dockerfile | 2 +-
.../integration/resolver/requirements.txt | 12 +-
docker/test/integration/runner/Dockerfile | 4 +-
.../integration/runner/dockerd-entrypoint.sh | 6 +-
docker/test/integration/s3_proxy/Dockerfile | 2 +-
docker/test/keeper-jepsen/Dockerfile | 4 +-
docker/test/libfuzzer/Dockerfile | 6 +-
docker/test/performance-comparison/Dockerfile | 8 +-
docker/test/server-jepsen/Dockerfile | 4 +-
docker/test/sqlancer/Dockerfile | 2 +-
docker/test/sqllogic/Dockerfile | 4 +-
docker/test/sqltest/Dockerfile | 4 +-
docker/test/stateful/Dockerfile | 4 +-
docker/test/stateless/Dockerfile | 4 +-
docker/test/stress/Dockerfile | 4 +-
docker/test/stress/README.md | 2 +-
docker/test/unit/Dockerfile | 4 +-
docker/test/upgrade/Dockerfile | 29 +
docker/test/util/Dockerfile | 4 +-
src/Common/SignalHandlers.cpp | 4 +-
tests/broken_tests.json | 281 ++
tests/ci/build_check.py | 66 +-
tests/ci/build_report_check.py | 6 +-
tests/ci/changelog.py | 56 +-
tests/ci/ci.py | 51 +-
tests/ci/ci_buddy.py | 4 +-
tests/ci/ci_cache.py | 3 +-
tests/ci/ci_config.py | 39 +-
tests/ci/ci_definitions.py | 48 +-
tests/ci/ci_fuzzer_check.py | 2 +-
tests/ci/clickbench.py | 2 +-
tests/ci/clickhouse_helper.py | 18 +-
tests/ci/commit_status_helper.py | 9 +-
tests/ci/compatibility_check.py | 4 +-
tests/ci/create_release.py | 2 +-
tests/ci/docker_images_check.py | 3 +-
tests/ci/docker_images_helper.py | 8 +-
tests/ci/docker_manifests_merge.py | 2 +-
tests/ci/docker_server.py | 13 +-
tests/ci/docs_check.py | 4 +-
tests/ci/env_helper.py | 17 +-
tests/ci/fast_test_check.py | 2 +-
tests/ci/functional_test_check.py | 32 +-
tests/ci/get_robot_token.py | 14 +-
tests/ci/git_helper.py | 44 +-
tests/ci/install_check.py | 9 +-
tests/ci/integration_test_images.py | 25 +-
tests/ci/integration_tests_runner.py | 102 +-
tests/ci/jepsen_check.py | 4 +-
tests/ci/libfuzzer_test_check.py | 2 +-
tests/ci/performance_comparison_check.py | 6 +-
tests/ci/pr_info.py | 25 +-
.../packaging/ansible/inventory/localhost.yml | 73 +
.../roles/get_cloudfront_info/tasks/main.yml | 34 +
.../ansible/roles/publish_pkgs/tasks/main.yml | 98 +
.../roles/update_bin_repo/tasks/main.yml | 52 +
.../roles/update_deb_repo/tasks/main.yml | 61 +
.../templates/apt-ftparchive-stable.conf | 6 +
.../templates/apt-ftparchive.conf | 17 +
.../roles/update_rpm_repo/tasks/main.yml | 51 +
.../roles/update_rpm_repo/templates/repo.j2 | 7 +
.../update_rpm_repo/templates/rpmmacros.j2 | 1 +
.../roles/update_tar_repo/tasks/main.yml | 61 +
.../packaging/ansible/sign-and-release.yml | 8 +
.../release/packaging/dirindex/dirindexgen.py | 122 +
.../packaging/static/bootstrap.bundle.min.js | 7 +
tests/ci/report.py | 5 +-
tests/ci/s3_helper.py | 41 +
tests/ci/sign_release.py | 97 +
tests/ci/sqlancer_check.py | 2 +-
tests/ci/sqllogic_test.py | 2 +-
tests/ci/sqltest.py | 2 +-
tests/ci/stress_check.py | 36 +-
tests/ci/test_ci_config.py | 1 -
tests/ci/test_ci_options.py | 1 -
tests/ci/test_git.py | 10 +-
tests/ci/test_version.py | 80 +-
tests/ci/tests/autogenerated_versions.txt | 1 +
tests/ci/tests/docker_images_for_tests.json | 76 +-
tests/ci/unit_tests_check.py | 2 +-
tests/ci/version_helper.py | 134 +-
tests/config/config.d/azure_storage_conf.xml | 6 +-
.../process_functional_tests_result.py | 23 +-
tests/docker_scripts/stress_runner.sh | 4 +
.../compose/docker_compose_clickhouse.yml | 2 +-
.../compose/docker_compose_dotnet_client.yml | 2 +-
.../compose/docker_compose_jdbc_bridge.yml | 1 +
.../compose/docker_compose_keeper.yml | 6 +-
.../docker_compose_kerberized_kafka.yml | 2 +-
.../compose/docker_compose_kerberos_kdc.yml | 2 +-
.../compose/docker_compose_minio.yml | 6 +-
.../docker_compose_mysql_golang_client.yml | 2 +-
.../docker_compose_mysql_java_client.yml | 2 +-
.../docker_compose_mysql_js_client.yml | 2 +-
.../docker_compose_mysql_php_client.yml | 2 +-
.../compose/docker_compose_nginx.yml | 2 +-
.../docker_compose_postgresql_java_client.yml | 2 +-
tests/integration/helpers/cluster.py | 11 +-
tests/integration/helpers/network.py | 2 +-
tests/integration/runner | 2 +-
.../test_attach_partition_using_copy/test.py | 4 +-
.../test_backward_compatibility/test.py | 2 +-
.../test_aggregate_fixed_key.py | 2 +-
.../test_aggregate_function_state.py | 4 +-
.../test_convert_ordinary.py | 2 +-
.../test_cte_distributed.py | 2 +-
.../test_functions.py | 2 +-
.../test_insert_profile_events.py | 2 +-
.../test_ip_types_binary_compatibility.py | 2 +-
.../test_memory_bound_aggregation.py | 4 +-
.../test_normalized_count_comparison.py | 2 +-
.../test_select_aggregate_alias_column.py | 2 +-
.../test_short_strings_aggregation.py | 4 +-
...test_vertical_merges_from_compact_parts.py | 2 +-
tests/integration/test_cow_policy/test.py | 4 +-
.../test_disk_over_web_server/test.py | 2 +-
.../test.py | 2 +-
tests/integration/test_old_versions/test.py | 2 +-
.../test_polymorphic_parts/test.py | 2 +-
.../test.py | 4 +-
.../test_replicating_constants/test.py | 4 +-
.../configs/remote_servers.xml | 1 +
.../test_system_ddl_worker_queue/test.py | 128 +-
.../test_trace_log_build_id/test.py | 2 +-
tests/integration/test_ttl_replicated/test.py | 6 +-
tests/integration/test_version_update/test.py | 2 +-
.../test.py | 6 +-
213 files changed, 6406 insertions(+), 8423 deletions(-)
create mode 100644 .github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md
delete mode 100644 .github/ISSUE_TEMPLATE/10_question.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/20_feature-request.yaml
create mode 100644 .github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md
create mode 100644 .github/ISSUE_TEMPLATE/30_project-antalya-question.md
delete mode 100644 .github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml
create mode 100644 .github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md
delete mode 100644 .github/ISSUE_TEMPLATE/45_usability-issue.yaml
create mode 100644 .github/ISSUE_TEMPLATE/50_altinity-stable-question.md
delete mode 100644 .github/ISSUE_TEMPLATE/50_build-issue.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/60_documentation-issue.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/70_performance-issue.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/80_backward-compatibility.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/85_bug-report.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/90_fuzzing-report.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/95_sanitizer-report.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/96_installation-issues.yaml
create mode 100644 .github/actions/create_workflow_report/action.yml
create mode 100644 .github/actions/create_workflow_report/ci_run_report.html.jinja
create mode 100755 .github/actions/create_workflow_report/create_workflow_report.py
create mode 100644 .github/actions/docker_setup/action.yml
create mode 100644 .github/grype/parse_vulnerabilities_grype.py
create mode 100755 .github/grype/run_grype_scan.sh
create mode 100755 .github/grype/transform_and_upload_results_s3.sh
create mode 100755 .github/retry.sh
create mode 100644 .github/workflows/README.md
create mode 100644 .github/workflows/cancel.yml
create mode 100644 .github/workflows/compare_fails.yml
create mode 100644 .github/workflows/docker_publish.yml
create mode 100644 .github/workflows/grype_scan.yml
create mode 100644 .github/workflows/regression.yml
create mode 100644 .github/workflows/repo-sanity-checks.yml
create mode 100644 .github/workflows/reusable_sign.yml
create mode 100644 .github/workflows/scheduled_runs.yml
create mode 100644 .github/workflows/sign_and_release.yml
create mode 100644 docker/test/upgrade/Dockerfile
create mode 100644 tests/broken_tests.json
mode change 100644 => 100755 tests/ci/ci.py
create mode 100644 tests/ci/release/packaging/ansible/inventory/localhost.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/get_cloudfront_info/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/publish_pkgs/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/update_bin_repo/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive-stable.conf
create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive.conf
create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/repo.j2
create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/rpmmacros.j2
create mode 100644 tests/ci/release/packaging/ansible/roles/update_tar_repo/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/sign-and-release.yml
create mode 100755 tests/ci/release/packaging/dirindex/dirindexgen.py
create mode 100644 tests/ci/release/packaging/static/bootstrap.bundle.min.js
create mode 100644 tests/ci/sign_release.py
diff --git a/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md
new file mode 100644
index 000000000000..0c8c15a05eaf
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md
@@ -0,0 +1,36 @@
+---
+name: Project Antalya Bug Report
+about: Help us improve Project Antalya
+title: ''
+labels: antalya
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Key information**
+Provide relevant runtime details.
+ - Project Antalya Build Version
+ - Cloud provider, e.g., AWS
+ - Kubernetes provider, e.g., GKE or Minikube
+ - Object storage, e.g., AWS S3 or Minio
+ - Iceberg catalog, e.g., Glue with REST Proxy
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/10_question.yaml b/.github/ISSUE_TEMPLATE/10_question.yaml
deleted file mode 100644
index 71a3d3da6425..000000000000
--- a/.github/ISSUE_TEMPLATE/10_question.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: Question
-description: Ask a question about ClickHouse
-labels: ["question"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure to check documentation https://clickhouse.com/docs/ first. If the question is concise and probably has a short answer, asking it in [community Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Question
- description: Please put your question here.
- validations:
- required: true
diff --git a/.github/ISSUE_TEMPLATE/20_feature-request.yaml b/.github/ISSUE_TEMPLATE/20_feature-request.yaml
deleted file mode 100644
index 054efc2d61ee..000000000000
--- a/.github/ISSUE_TEMPLATE/20_feature-request.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-name: Feature request
-description: Suggest an idea for ClickHouse
-labels: ["feature"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Use case
- description: A clear and concise description of what the intended usage scenario is.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe the solution you'd like
- description: A clear and concise description of what you want to happen.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe alternatives you've considered
- description: A clear and concise description of any alternative solutions or features you've considered.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context or screenshots about the feature request here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md
new file mode 100644
index 000000000000..603584bf4428
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md
@@ -0,0 +1,20 @@
+---
+name: Project Antalya Feature request
+about: Suggest an idea for Project Antalya
+title: ''
+labels: antalya, enhancement
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/ISSUE_TEMPLATE/30_project-antalya-question.md b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md
new file mode 100644
index 000000000000..c77cee4a916b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md
@@ -0,0 +1,16 @@
+---
+name: Project Antalya Question
+about: Ask a question about Project Antalya
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first.
+
+If your question is concise and probably has a short answer, asking it in the [the Altinity Slack workspace](https://altinity.com/slack) is probably the fastest way to find the answer. Use the #antalya channel.
+
+If you'd rather file a GitHub issue, remove all this text and ask your question here.
+
+Please include relevant environment information as applicable.
diff --git a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml b/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml
deleted file mode 100644
index 3cb55a960d18..000000000000
--- a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Unexpected behaviour
-description: Some feature is working in non-obvious way
-labels: ["unexpected behaviour"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml b/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml
deleted file mode 100644
index 68ab7129a873..000000000000
--- a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Incomplete implementation
-description: Implementation of existing feature is not finished
-labels: ["unfinished code"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md
new file mode 100644
index 000000000000..90bf241dc195
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md
@@ -0,0 +1,50 @@
+---
+name: Altinity Stable Bug report
+about: Report something broken in an Altinity Stable Build
+title: ''
+labels: stable
+assignees: ''
+
+---
+
+✅ *I checked [the Altinity Stable Builds lifecycle table](https://docs.altinity.com/altinitystablebuilds/#altinity-stable-builds-life-cycle-table), and the Altinity Stable Build version I'm using is still supported.*
+
+## Type of problem
+Choose one of the following items, then delete the others:
+
+**Bug report** - something's broken
+
+**Incomplete implementation** - something's not quite right
+
+**Performance issue** - something works, just not as quickly as it should
+
+**Backwards compatibility issue** - something used to work, but now it doesn't
+
+**Unexpected behavior** - something surprising happened, but it wasn't the good kind of surprise
+
+**Installation issue** - something doesn't install the way it should
+
+**Usability issue** - something works, but it could be a lot easier
+
+**Documentation issue** - something in the docs is wrong, incomplete, or confusing
+
+## Describe the situation
+A clear, concise description of what's happening. Can you reproduce it in a ClickHouse Official build of the same version?
+
+## How to reproduce the behavior
+
+* Which Altinity Stable Build version to use
+* Which interface to use, if it matters
+* Non-default settings, if any
+* `CREATE TABLE` statements for all tables involved
+* Sample data for all these tables, use the [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/31fd4f5eb41d5ec26724fc645c11fe4d62eae07f/programs/obfuscator/README.md) if necessary
+* Queries to run that lead to an unexpected result
+
+## Expected behavior
+A clear, concise description of what you expected to happen.
+
+## Logs, error messages, stacktraces, screenshots...
+Add any details that might explain the issue.
+
+## Additional context
+Add any other context about the issue here.
diff --git a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml b/.github/ISSUE_TEMPLATE/45_usability-issue.yaml
deleted file mode 100644
index 96543a7af6c9..000000000000
--- a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Usability issue
-description: Report something can be made more convenient to use
-labels: ["usability"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md
new file mode 100644
index 000000000000..027970e25a02
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md
@@ -0,0 +1,16 @@
+---
+name: Altinity Stable Question
+about: Ask a question about an Altinity Stable Build
+title: ''
+labels: question, stable
+assignees: ''
+
+---
+
+Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first.
+
+If your question is concise and probably has a short answer, asking it in the [the Altinity Slack channel](https://altinity.com/slack) is probably the fastest way to find the answer.
+
+For more complicated questions, consider [asking them on StackOverflow with the tag "clickhouse"](https://stackoverflow.com/questions/tagged/clickhouse).
+
+If you'd rather file a GitHub issue, remove all this text and ask your question here.
diff --git a/.github/ISSUE_TEMPLATE/50_build-issue.yaml b/.github/ISSUE_TEMPLATE/50_build-issue.yaml
deleted file mode 100644
index a96f538bcc89..000000000000
--- a/.github/ISSUE_TEMPLATE/50_build-issue.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: Build issue
-description: Report failed ClickHouse build from master
-labels: ["build"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Operating system
- description: OS kind or distribution, specific version/release, non-standard kernel if any. If you are trying to build inside virtual machine, please mention it too.
- validations:
- required: true
- - type: textarea
- attributes:
- label: CMake version
- description: The output of `cmake --version`.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Ninja version
- description: The output of `ninja --version`.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Compiler name and version
- description: We recommend to use clang. The version can be obtained via `clang --version`.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Full cmake and/or ninja output
- description: Please include everything!
- validations:
- required: true
diff --git a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml b/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml
deleted file mode 100644
index bba6df87a783..000000000000
--- a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Documentation issue
-description: Report something incorrect or missing in documentation
-labels: ["comp-documentation"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the issue
- description: A clear and concise description of what's wrong in documentation.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml b/.github/ISSUE_TEMPLATE/70_performance-issue.yaml
deleted file mode 100644
index 281d51c73b51..000000000000
--- a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: Performance issue
-description: Report something working slower than expected
-labels: ["performance"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the situation
- description: What exactly works slower than expected?
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected performance
- description: What are your performance expectation, why do you think they are realistic? Has it been working faster in older ClickHouse releases? Is it working faster in some specific other system?
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml b/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml
deleted file mode 100644
index 32786cd87dca..000000000000
--- a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: Backward compatibility issue
-description: Report the case when the behaviour of a new version can break existing use cases
-labels: ["backward compatibility"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/85_bug-report.yaml b/.github/ISSUE_TEMPLATE/85_bug-report.yaml
deleted file mode 100644
index 5344fbcda255..000000000000
--- a/.github/ISSUE_TEMPLATE/85_bug-report.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-name: Bug report
-description: Wrong behavior (visible to users) in the official ClickHouse release.
-labels: ["potential bug"]
-body:
- - type: markdown
- attributes:
- value: |
- > Please make sure that the version you're using is still supported (you can find the list [here](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md#scope-and-supported-versions)).
- > You have to provide the following information whenever possible.
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe what's wrong
- description: |
- * A clear and concise description of what works not as it is supposed to.
- * A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/).
- validations:
- required: true
- - type: dropdown
- attributes:
- label: Does it reproduce on the most recent release?
- description: |
- [The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
- options:
- - 'Yes'
- - 'No'
- validations:
- required: true
- - type: markdown
- attributes:
- value: |
- -----
- > Change "enabled" to true in "send_crash_reports" section in `config.xml`:
- ```xml
-
-
-
- false
-
- ```
- -----
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml b/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml
deleted file mode 100644
index 84dc8a372e5a..000000000000
--- a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Assertion found via fuzzing
-description: Potential issue has been found via Fuzzer or Stress tests
-labels: ["fuzz"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Describe the bug
- description: A link to the report.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: Try to reproduce the report and copy the tables and queries involved.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml b/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml
deleted file mode 100644
index 7bb47e2b824b..000000000000
--- a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Sanitizer alert
-description: Potential issue has been found by special code instrumentation
-labels: ["testing"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Describe the bug
- description: A link to the report.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: Try to reproduce the report and copy the tables and queries involved.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml b/.github/ISSUE_TEMPLATE/96_installation-issues.yaml
deleted file mode 100644
index f71f6079453e..000000000000
--- a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-name: Installation issue
-description: Issue with ClickHouse installation from https://clickhouse.com/docs/en/install/
-labels: ["comp-install"]
-body:
- - type: markdown
- attributes:
- value: |
- > **I have tried the following solutions**: https://clickhouse.com/docs/en/faq/troubleshooting/#troubleshooting-installation-errors
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Installation type
- description: Packages, docker, single binary, curl?
- validations:
- required: true
- - type: textarea
- attributes:
- label: Source of the ClickHouse
- description: A link to the source. Or the command you've tried.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe the problem.
- description: What went wrong and what is the expected result?
- validations:
- required: true
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * For Linux-based operating systems: provide a script for clear docker container from the official image
- * For anything else: steps to reproduce on as much as possible clear system
- validations:
- required: false
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 7a933bb4d857..d15e5a8b50e2 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -23,19 +23,4 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
### Documentation entry for user-facing changes
-- [ ] Documentation is written (mandatory for new features)
-
diff --git a/.github/actionlint.yml b/.github/actionlint.yml
index cf5f575e3c74..904a548dadd5 100644
--- a/.github/actionlint.yml
+++ b/.github/actionlint.yml
@@ -1,9 +1,9 @@
self-hosted-runner:
labels:
- - builder
- - func-tester
- - func-tester-aarch64
+ - altinity-builder
+ - altinity-func-tester
+ - altinity-func-tester-aarch64
- fuzzer-unit-tester
- - style-checker
- - style-checker-aarch64
+ - altinity-style-checker
+ - altinity-style-checker-aarch64
- release-maker
diff --git a/.github/actions/common_setup/action.yml b/.github/actions/common_setup/action.yml
index e492fa97816d..87db4d9fa503 100644
--- a/.github/actions/common_setup/action.yml
+++ b/.github/actions/common_setup/action.yml
@@ -28,3 +28,20 @@ runs:
run: |
# to remove every leftovers
sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
+ - name: Setup zram
+ shell: bash
+ run: |
+ # Check if zram is already set up
+ if ! lsmod | grep -q "^zram "; then
+ sudo modprobe zram
+ fi
+
+ # Only proceed with setup if /dev/zram0 is not already in use
+ if ! swapon -s | grep -q "/dev/zram0"; then
+ MemTotal=$(grep -Po "(?<=MemTotal:)\s+\d+" /proc/meminfo) # KiB
+ Percent=200
+ ZRAM_SIZE=$(($MemTotal / 1024 / 1024 * $Percent / 100)) # Convert to GiB
+ .github/retry.sh 30 2 sudo zramctl --size ${ZRAM_SIZE}GiB --algorithm zstd /dev/zram0
+ sudo mkswap /dev/zram0 && sudo swapon -p 100 /dev/zram0
+ sudo sysctl vm.swappiness=200
+ fi
diff --git a/.github/actions/create_workflow_report/action.yml b/.github/actions/create_workflow_report/action.yml
new file mode 100644
index 000000000000..dbca19c28e37
--- /dev/null
+++ b/.github/actions/create_workflow_report/action.yml
@@ -0,0 +1,40 @@
+name: Create and Upload Combined Report
+description: Create and upload a combined CI report
+inputs:
+ final:
+ description: "Control whether the report is final or a preview"
+ required: false
+ default: "false"
+runs:
+ using: "composite"
+ steps:
+ - name: Create and upload workflow report
+ env:
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ ACTIONS_RUN_URL: ${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }}
+ FINAL: ${{ inputs.final }}
+ shell: bash
+ run: |
+ pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5
+
+ CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py"
+ ARGS="--commit-sha $COMMIT_SHA --actions-run-url $ACTIONS_RUN_URL --known-fails tests/broken_tests.json --cves"
+
+ set +e -x
+ if [[ "$FINAL" == "false" ]]; then
+ REPORT_LINK=$($CMD $ARGS --mark-preview)
+ else
+ REPORT_LINK=$($CMD $ARGS)
+ fi
+
+ echo $REPORT_LINK
+
+ if [[ "$FINAL" == "true" ]]; then
+ IS_VALID_URL=$(echo $REPORT_LINK | grep -E '^https?://')
+ if [[ -n $IS_VALID_URL ]]; then
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "Error: $REPORT_LINK" >> $GITHUB_STEP_SUMMARY
+ exit 1
+ fi
+ fi
diff --git a/.github/actions/create_workflow_report/ci_run_report.html.jinja b/.github/actions/create_workflow_report/ci_run_report.html.jinja
new file mode 100644
index 000000000000..a92c1aa34e3a
--- /dev/null
+++ b/.github/actions/create_workflow_report/ci_run_report.html.jinja
@@ -0,0 +1,269 @@
+
+
+
+
+
+
+
+
+ {{ title }}
+
+
+
+
+
+
+ {{ title }}
+
+ {% if is_preview %}
+ This is a preview. The workflow is not yet finished.
+ {% endif %}
+ Table of Contents
+
+
+ {%- if pr_number != 0 -%}
+ New Fails in PR
+ Compared with base sha {{ base_sha }}
+ {{ new_fails_html }}
+ {%- endif %}
+
+ CI Jobs Status
+ {{ ci_jobs_status_html }}
+
+ Checks Errors
+ {{ checks_errors_html }}
+
+ Checks New Fails
+ {{ checks_fails_html }}
+
+ Regression New Fails
+ {{ regression_fails_html }}
+
+ Docker Images CVEs
+ {{ docker_images_cves_html }}
+
+ Checks Known Fails
+
+ Fail reason conventions:
+ KNOWN - Accepted fail and fix is not planned
+ INVESTIGATE - We don't know why it fails
+ NEEDSFIX - Investigation done and a fix is needed to make it pass
+
+ {{ checks_known_fails_html }}
+
+
+
+
\ No newline at end of file
diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py
new file mode 100755
index 000000000000..580e27ed9ec5
--- /dev/null
+++ b/.github/actions/create_workflow_report/create_workflow_report.py
@@ -0,0 +1,661 @@
+#!/usr/bin/env python3
+import argparse
+import os
+from pathlib import Path
+from itertools import combinations
+import json
+from datetime import datetime
+from functools import lru_cache
+
+import pandas as pd
+from jinja2 import Environment, FileSystemLoader
+import requests
+from clickhouse_driver import Client
+import boto3
+from botocore.exceptions import NoCredentialsError
+
+DATABASE_HOST_VAR = "CHECKS_DATABASE_HOST"
+DATABASE_USER_VAR = "CHECKS_DATABASE_USER"
+DATABASE_PASSWORD_VAR = "CHECKS_DATABASE_PASSWORD"
+S3_BUCKET = "altinity-build-artifacts"
+GITHUB_REPO = "Altinity/ClickHouse"
+
+# Set up the Jinja2 environment
+template_dir = os.path.dirname(__file__)
+
+# Load the template
+template = Environment(loader=FileSystemLoader(template_dir)).get_template(
+ "ci_run_report.html.jinja"
+)
+
+
+def get_commit_statuses(sha: str) -> pd.DataFrame:
+ """
+ Fetch commit statuses for a given SHA and return as a pandas DataFrame.
+ Handles pagination to get all statuses.
+
+ Args:
+ sha (str): Commit SHA to fetch statuses for.
+
+ Returns:
+ pd.DataFrame: DataFrame containing all statuses.
+ """
+ headers = {
+ "Authorization": f"token {os.getenv('GITHUB_TOKEN')}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/commits/{sha}/statuses"
+
+ all_data = []
+
+ while url:
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch statuses: {response.status_code} {response.text}"
+ )
+
+ data = response.json()
+ all_data.extend(data)
+
+ # Check for pagination links in the response headers
+ if "Link" in response.headers:
+ links = response.headers["Link"].split(",")
+ next_url = None
+
+ for link in links:
+ parts = link.strip().split(";")
+ if len(parts) == 2 and 'rel="next"' in parts[1]:
+ next_url = parts[0].strip("<>")
+ break
+
+ url = next_url
+ else:
+ url = None
+
+ # Parse relevant fields
+ parsed = [
+ {
+ "job_name": item["context"],
+ "job_status": item["state"],
+ "message": item["description"],
+ "results_link": item["target_url"],
+ }
+ for item in all_data
+ ]
+
+ # Create DataFrame
+ df = pd.DataFrame(parsed)
+
+ # Drop duplicates keeping the first occurrence (newest status for each context)
+ # GitHub returns statuses in reverse chronological order
+ df = df.drop_duplicates(subset=["job_name"], keep="first")
+
+ # Sort by status and job name
+ return df.sort_values(
+ by=["job_status", "job_name"], ascending=[True, True]
+ ).reset_index(drop=True)
+
+
+def get_pr_info_from_number(pr_number: str) -> dict:
+ """
+ Fetch pull request information for a given PR number.
+
+ Args:
+ pr_number (str): Pull request number to fetch information for.
+
+ Returns:
+ dict: Dictionary containing PR information.
+ """
+ headers = {
+ "Authorization": f"token {os.getenv('GITHUB_TOKEN')}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/pulls/{pr_number}"
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch pull request info: {response.status_code} {response.text}"
+ )
+
+ return response.json()
+
+
+@lru_cache
+def get_run_details(run_url: str) -> dict:
+ """
+ Fetch run details for a given run URL.
+ """
+ run_id = run_url.split("/")[-1]
+
+ headers = {
+ "Authorization": f"token {os.getenv('GITHUB_TOKEN')}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/actions/runs/{run_id}"
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch run details: {response.status_code} {response.text}"
+ )
+
+ return response.json()
+
+
+def get_checks_fails(client: Client, job_url: str):
+ """
+ Get tests that did not succeed for the given job URL.
+ Exclude checks that have status 'error' as they are counted in get_checks_errors.
+ """
+ query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ argMax(check_status, check_start_time) as job_status,
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE task_url LIKE '{job_url}%'
+ AND test_status IN ('FAIL', 'ERROR')
+ AND job_status!='error'
+ ORDER BY job_name, test_name
+ """
+ return client.query_dataframe(query)
+
+
+def get_checks_known_fails(client: Client, job_url: str, known_fails: dict):
+ """
+ Get tests that are known to fail for the given job URL.
+ """
+ if len(known_fails) == 0:
+ return pd.DataFrame()
+
+ query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ argMax(check_status, check_start_time) as job_status,
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE task_url LIKE '{job_url}%'
+ AND test_status='BROKEN'
+ AND test_name IN ({','.join(f"'{test}'" for test in known_fails.keys())})
+ ORDER BY job_name, test_name
+ """
+
+ df = client.query_dataframe(query)
+
+ df.insert(
+ len(df.columns) - 1,
+ "reason",
+ df["test_name"]
+ .astype(str)
+ .apply(
+ lambda test_name: known_fails[test_name].get("reason", "No reason given")
+ ),
+ )
+
+ return df
+
+
+def get_checks_errors(client: Client, job_url: str):
+ """
+ Get checks that have status 'error' for the given job URL.
+ """
+ query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ argMax(check_status, check_start_time) as job_status,
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE task_url LIKE '{job_url}%'
+ AND job_status=='error'
+ ORDER BY job_name, test_name
+ """
+ return client.query_dataframe(query)
+
+
+def drop_prefix_rows(df, column_to_clean):
+ """
+ Drop rows from the dataframe if:
+ - the row matches another row completely except for the specified column
+ - the specified column of that row is a prefix of the same column in another row
+ """
+ to_drop = set()
+ reference_columns = [col for col in df.columns if col != column_to_clean]
+ for (i, row_1), (j, row_2) in combinations(df.iterrows(), 2):
+ if all(row_1[col] == row_2[col] for col in reference_columns):
+ if row_2[column_to_clean].startswith(row_1[column_to_clean]):
+ to_drop.add(i)
+ elif row_1[column_to_clean].startswith(row_2[column_to_clean]):
+ to_drop.add(j)
+ return df.drop(to_drop)
+
+
+def get_regression_fails(client: Client, job_url: str):
+ """
+ Get regression tests that did not succeed for the given job URL.
+ """
+ # If you rename the alias for report_url, also update the formatters in format_results_as_html_table
+ # Nested SELECT handles test reruns
+ query = f"""SELECT arch, job_name, status, test_name, results_link
+ FROM (
+ SELECT
+ architecture as arch,
+ test_name,
+ argMax(result, start_time) AS status,
+ job_name,
+ report_url as results_link,
+ job_url
+ FROM `gh-data`.clickhouse_regression_results
+ GROUP BY architecture, test_name, job_url, job_name, report_url
+ ORDER BY length(test_name) DESC
+ )
+ WHERE job_url LIKE '{job_url}%'
+ AND status IN ('Fail', 'Error')
+ """
+ df = client.query_dataframe(query)
+ df = drop_prefix_rows(df, "test_name")
+ df["job_name"] = df["job_name"].str.title()
+ return df
+
+
+def get_new_fails_this_pr(
+ client: Client,
+ pr_info: dict,
+ checks_fails: pd.DataFrame,
+ regression_fails: pd.DataFrame,
+):
+ """
+ Get tests that failed in the PR but passed in the base branch.
+ Compares both checks and regression test results.
+ """
+ base_sha = pr_info.get("base", {}).get("sha")
+ if not base_sha:
+ raise Exception("No base SHA found for PR")
+
+ # Modify tables to have the same columns
+ if len(checks_fails) > 0:
+ checks_fails = checks_fails.copy().drop(columns=["job_status"])
+ if len(regression_fails) > 0:
+ regression_fails = regression_fails.copy()
+ regression_fails["job_name"] = regression_fails.apply(
+ lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1
+ )
+ regression_fails["test_status"] = regression_fails["status"]
+
+ # Combine both types of fails and select only desired columns
+ desired_columns = ["job_name", "test_name", "test_status", "results_link"]
+ all_pr_fails = pd.concat([checks_fails, regression_fails], ignore_index=True)[
+ desired_columns
+ ]
+ if len(all_pr_fails) == 0:
+ return pd.DataFrame()
+
+ # Get all checks from the base branch that didn't fail
+ base_checks_query = f"""SELECT job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{base_sha}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE test_status NOT IN ('FAIL', 'ERROR')
+ ORDER BY job_name, test_name
+ """
+ base_checks = client.query_dataframe(base_checks_query)
+
+ # Get regression results from base branch that didn't fail
+ base_regression_query = f"""SELECT arch, job_name, status, test_name, results_link
+ FROM (
+ SELECT
+ architecture as arch,
+ test_name,
+ argMax(result, start_time) AS status,
+ job_url,
+ job_name,
+ report_url as results_link
+ FROM `gh-data`.clickhouse_regression_results
+ WHERE results_link LIKE'%/{base_sha}/%'
+ GROUP BY architecture, test_name, job_url, job_name, report_url
+ ORDER BY length(test_name) DESC
+ )
+ WHERE status NOT IN ('Fail', 'Error')
+ """
+ base_regression = client.query_dataframe(base_regression_query)
+ if len(base_regression) > 0:
+ base_regression["job_name"] = base_regression.apply(
+ lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1
+ )
+ base_regression["test_status"] = base_regression["status"]
+ base_regression = base_regression.drop(columns=["arch", "status"])
+
+ # Combine base results
+ base_results = pd.concat([base_checks, base_regression], ignore_index=True)
+
+ # Find tests that failed in PR but passed in base
+ pr_failed_tests = set(zip(all_pr_fails["job_name"], all_pr_fails["test_name"]))
+ base_passed_tests = set(zip(base_results["job_name"], base_results["test_name"]))
+
+ new_fails = pr_failed_tests.intersection(base_passed_tests)
+
+ # Filter PR results to only include new fails
+ mask = all_pr_fails.apply(
+ lambda row: (row["job_name"], row["test_name"]) in new_fails, axis=1
+ )
+ new_fails_df = all_pr_fails[mask]
+
+ return new_fails_df
+
+
+def get_cves(pr_number, commit_sha):
+ """
+ Fetch Grype results from S3.
+
+ If no results are available for download, returns ... (Ellipsis).
+ """
+ s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL"))
+ s3_prefix = f"{pr_number}/{commit_sha}/grype/"
+
+ results = []
+
+ response = s3_client.list_objects_v2(
+ Bucket=S3_BUCKET, Prefix=s3_prefix, Delimiter="/"
+ )
+ grype_result_dirs = [
+ content["Prefix"] for content in response.get("CommonPrefixes", [])
+ ]
+
+ if len(grype_result_dirs) == 0:
+ # We were asked to check the CVE data, but none was found,
+ # maybe this is a preview report and grype results are not available yet
+ return ...
+
+ for path in grype_result_dirs:
+ file_key = f"{path}result.json"
+ file_response = s3_client.get_object(Bucket=S3_BUCKET, Key=file_key)
+ content = file_response["Body"].read().decode("utf-8")
+ results.append(json.loads(content))
+
+ rows = []
+ for scan_result in results:
+ for match in scan_result["matches"]:
+ rows.append(
+ {
+ "docker_image": scan_result["source"]["target"]["userInput"],
+ "severity": match["vulnerability"]["severity"],
+ "identifier": match["vulnerability"]["id"],
+ "namespace": match["vulnerability"]["namespace"],
+ }
+ )
+
+ if len(rows) == 0:
+ return pd.DataFrame()
+
+ df = pd.DataFrame(rows).drop_duplicates()
+ df = df.sort_values(
+ by="severity",
+ key=lambda col: col.str.lower().map(
+ {"critical": 1, "high": 2, "medium": 3, "low": 4, "negligible": 5}
+ ),
+ )
+ return df
+
+
+def url_to_html_link(url: str) -> str:
+ if not url:
+ return ""
+ text = url.split("/")[-1].replace("__", "_")
+ if not text:
+ text = "results"
+ return f'{text} '
+
+
+def format_test_name_for_linewrap(text: str) -> str:
+ """Tweak the test name to improve line wrapping."""
+ return f'{text} '
+
+
+def format_test_status(text: str) -> str:
+ """Format the test status for better readability."""
+ color = (
+ "red"
+ if text.lower().startswith("fail")
+ else "orange" if text.lower() in ("error", "broken") else "green"
+ )
+ return f'{text} '
+
+
+def format_results_as_html_table(results) -> str:
+ if len(results) == 0:
+ return "Nothing to report
"
+ results.columns = [col.replace("_", " ").title() for col in results.columns]
+ html = results.to_html(
+ index=False,
+ formatters={
+ "Results Link": url_to_html_link,
+ "Test Name": format_test_name_for_linewrap,
+ "Test Status": format_test_status,
+ "Job Status": format_test_status,
+ "Status": format_test_status,
+ "Message": lambda m: m.replace("\n", " "),
+ "Identifier": lambda i: url_to_html_link(
+ "https://nvd.nist.gov/vuln/detail/" + i
+ ),
+ },
+ escape=False,
+ border=0,
+ classes=["test-results-table"],
+ )
+ return html
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description="Create a combined CI report.")
+ parser.add_argument( # Need the full URL rather than just the ID to query the databases
+ "--actions-run-url", required=True, help="URL of the actions run"
+ )
+ parser.add_argument(
+ "--pr-number", help="Pull request number for the S3 path", type=int
+ )
+ parser.add_argument("--commit-sha", help="Commit SHA for the S3 path")
+ parser.add_argument(
+ "--no-upload", action="store_true", help="Do not upload the report"
+ )
+ parser.add_argument(
+ "--known-fails", type=str, help="Path to the file with known fails"
+ )
+ parser.add_argument(
+ "--cves", action="store_true", help="Get CVEs from Grype results"
+ )
+ parser.add_argument(
+ "--mark-preview", action="store_true", help="Mark the report as a preview"
+ )
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+
+ if args.pr_number is None or args.commit_sha is None:
+ run_details = get_run_details(args.actions_run_url)
+ if args.pr_number is None:
+ if len(run_details["pull_requests"]) > 0:
+ args.pr_number = run_details["pull_requests"][0]["number"]
+ else:
+ args.pr_number = 0
+ if args.commit_sha is None:
+ args.commit_sha = run_details["head_commit"]["id"]
+
+ db_client = Client(
+ host=os.getenv(DATABASE_HOST_VAR),
+ user=os.getenv(DATABASE_USER_VAR),
+ password=os.getenv(DATABASE_PASSWORD_VAR),
+ port=9440,
+ secure="y",
+ verify=False,
+ settings={"use_numpy": True},
+ )
+
+ fail_results = {
+ "job_statuses": get_commit_statuses(args.commit_sha),
+ "checks_fails": get_checks_fails(db_client, args.actions_run_url),
+ "checks_known_fails": [],
+ "pr_new_fails": [],
+ "checks_errors": get_checks_errors(db_client, args.actions_run_url),
+ "regression_fails": get_regression_fails(db_client, args.actions_run_url),
+ "docker_images_cves": (
+ [] if not args.cves else get_cves(args.pr_number, args.commit_sha)
+ ),
+ }
+
+ # get_cves returns ... in the case where no Grype result files were found.
+ # This might occur when run in preview mode.
+ cves_not_checked = not args.cves or fail_results["docker_images_cves"] is ...
+
+ if args.known_fails:
+ if not os.path.exists(args.known_fails):
+ print(f"Known fails file {args.known_fails} not found.")
+ exit(1)
+
+ with open(args.known_fails) as f:
+ known_fails = json.load(f)
+
+ if known_fails:
+ fail_results["checks_known_fails"] = get_checks_known_fails(
+ db_client, args.actions_run_url, known_fails
+ )
+
+ if args.pr_number == 0:
+ run_details = get_run_details(args.actions_run_url)
+ branch_name = run_details.get("head_branch", "unknown branch")
+ pr_info_html = f"Release ({branch_name})"
+ else:
+ try:
+ pr_info = get_pr_info_from_number(args.pr_number)
+ pr_info_html = f"""
+ #{pr_info.get("number")} ({pr_info.get("base", {}).get('ref')} <- {pr_info.get("head", {}).get('ref')}) {pr_info.get("title")}
+ """
+ fail_results["pr_new_fails"] = get_new_fails_this_pr(
+ db_client,
+ pr_info,
+ fail_results["checks_fails"],
+ fail_results["regression_fails"],
+ )
+ except Exception as e:
+ pr_info_html = e
+
+ high_cve_count = 0
+ if not cves_not_checked and len(fail_results["docker_images_cves"]) > 0:
+ high_cve_count = (
+ fail_results["docker_images_cves"]["severity"]
+ .str.lower()
+ .isin(("high", "critical"))
+ .sum()
+ )
+
+ # Define the context for rendering
+ context = {
+ "title": "ClickHouse® CI Workflow Run Report",
+ "github_repo": GITHUB_REPO,
+ "s3_bucket": S3_BUCKET,
+ "pr_info_html": pr_info_html,
+ "pr_number": args.pr_number,
+ "workflow_id": args.actions_run_url.split("/")[-1],
+ "commit_sha": args.commit_sha,
+ "base_sha": "" if args.pr_number == 0 else pr_info.get("base", {}).get("sha"),
+ "date": f"{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC",
+ "is_preview": args.mark_preview,
+ "counts": {
+ "jobs_status": f"{sum(fail_results['job_statuses']['job_status'] != 'success')} fail/error",
+ "checks_errors": len(fail_results["checks_errors"]),
+ "checks_new_fails": len(fail_results["checks_fails"]),
+ "regression_new_fails": len(fail_results["regression_fails"]),
+ "cves": "N/A" if cves_not_checked else f"{high_cve_count} high/critical",
+ "checks_known_fails": (
+ "N/A"
+ if not args.known_fails
+ else len(fail_results["checks_known_fails"])
+ ),
+ "pr_new_fails": len(fail_results["pr_new_fails"]),
+ },
+ "ci_jobs_status_html": format_results_as_html_table(
+ fail_results["job_statuses"]
+ ),
+ "checks_errors_html": format_results_as_html_table(
+ fail_results["checks_errors"]
+ ),
+ "checks_fails_html": format_results_as_html_table(fail_results["checks_fails"]),
+ "regression_fails_html": format_results_as_html_table(
+ fail_results["regression_fails"]
+ ),
+ "docker_images_cves_html": (
+ "Not Checked
"
+ if cves_not_checked
+ else format_results_as_html_table(fail_results["docker_images_cves"])
+ ),
+ "checks_known_fails_html": (
+ "Not Checked
"
+ if not args.known_fails
+ else format_results_as_html_table(fail_results["checks_known_fails"])
+ ),
+ "new_fails_html": format_results_as_html_table(fail_results["pr_new_fails"]),
+ }
+
+ # Render the template with the context
+ rendered_html = template.render(context)
+
+ report_name = "ci_run_report.html"
+ report_path = Path(report_name)
+ report_path.write_text(rendered_html, encoding="utf-8")
+
+ if args.no_upload:
+ print(f"Report saved to {report_path}")
+ exit(0)
+
+ report_destination_key = f"{args.pr_number}/{args.commit_sha}/{report_name}"
+
+ # Upload the report to S3
+ s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL"))
+
+ try:
+ s3_client.put_object(
+ Bucket=S3_BUCKET,
+ Key=report_destination_key,
+ Body=rendered_html,
+ ContentType="text/html; charset=utf-8",
+ )
+ except NoCredentialsError:
+ print("Credentials not available for S3 upload.")
+
+ print(f"https://s3.amazonaws.com/{S3_BUCKET}/" + report_destination_key)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.github/actions/docker_setup/action.yml b/.github/actions/docker_setup/action.yml
new file mode 100644
index 000000000000..60c9a17519a6
--- /dev/null
+++ b/.github/actions/docker_setup/action.yml
@@ -0,0 +1,31 @@
+name: Docker setup
+description: Setup docker
+inputs:
+ test_name:
+ description: name of the test, used in determining ipv6 configs.
+ default: None
+ type: string
+runs:
+ using: "composite"
+ steps:
+ - name: Docker IPv6 configuration
+ shell: bash
+ env:
+ ipv6_subnet: ${{ contains(inputs.test_name, 'Integration') && '2001:db8:1::/64' || '2001:3984:3989::/64' }}
+ run: |
+ # make sure docker uses proper IPv6 config
+ sudo touch /etc/docker/daemon.json
+ sudo chown ubuntu:ubuntu /etc/docker/daemon.json
+ sudo cat < /etc/docker/daemon.json
+ {
+ "ipv6": true,
+ "fixed-cidr-v6": "${{ env.ipv6_subnet }}"
+ }
+ EOT
+ sudo chown root:root /etc/docker/daemon.json
+ sudo systemctl restart docker
+ sudo systemctl status docker
+ - name: Docker info
+ shell: bash
+ run: |
+ docker info
diff --git a/.github/grype/parse_vulnerabilities_grype.py b/.github/grype/parse_vulnerabilities_grype.py
new file mode 100644
index 000000000000..fec2ef3bfac7
--- /dev/null
+++ b/.github/grype/parse_vulnerabilities_grype.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+import json
+
+from testflows.core import *
+
+xfails = {}
+
+
+@Name("docker vulnerabilities")
+@XFails(xfails)
+@TestModule
+def docker_vulnerabilities(self):
+ with Given("I gather grype scan results"):
+ with open("./result.json", "r") as f:
+ results = json.load(f)
+
+ for vulnerability in results["matches"]:
+ with Test(
+ f"{vulnerability['vulnerability']['id']}@{vulnerability['vulnerability']['namespace']},{vulnerability['vulnerability']['severity']}",
+ flags=TE,
+ ):
+ note(vulnerability)
+ critical_levels = set(["HIGH", "CRITICAL"])
+ if vulnerability['vulnerability']["severity"].upper() in critical_levels:
+ with Then(
+ f"Found vulnerability of {vulnerability['vulnerability']['severity']} severity"
+ ):
+ result(Fail)
+
+
+if main():
+ docker_vulnerabilities()
diff --git a/.github/grype/run_grype_scan.sh b/.github/grype/run_grype_scan.sh
new file mode 100755
index 000000000000..af428e37d669
--- /dev/null
+++ b/.github/grype/run_grype_scan.sh
@@ -0,0 +1,18 @@
+set -x
+set -e
+
+IMAGE=$1
+
+GRYPE_VERSION=${GRYPE_VERSION:-"v0.92.2"}
+
+docker pull $IMAGE
+docker pull anchore/grype:${GRYPE_VERSION}
+
+docker run \
+ --rm --volume /var/run/docker.sock:/var/run/docker.sock \
+ --name Grype anchore/grype:${GRYPE_VERSION} \
+ --scope all-layers \
+ -o json \
+ $IMAGE > result.json
+
+ls -sh
diff --git a/.github/grype/transform_and_upload_results_s3.sh b/.github/grype/transform_and_upload_results_s3.sh
new file mode 100755
index 000000000000..7a10b02887ef
--- /dev/null
+++ b/.github/grype/transform_and_upload_results_s3.sh
@@ -0,0 +1,13 @@
+DOCKER_IMAGE=$(echo "$DOCKER_IMAGE" | sed 's/[\/:]/_/g')
+
+S3_PATH="s3://$S3_BUCKET/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE"
+HTTPS_S3_PATH="https://s3.amazonaws.com/$S3_BUCKET/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE"
+echo "https_s3_path=$HTTPS_S3_PATH" >> $GITHUB_OUTPUT
+
+tfs --no-colors transform nice raw.log nice.log.txt
+tfs --no-colors report results -a $HTTPS_S3_PATH raw.log - --copyright "Altinity LTD" | tfs --no-colors document convert > results.html
+
+aws s3 cp --no-progress nice.log.txt $S3_PATH/nice.log.txt --content-type "text/plain; charset=utf-8" || echo "nice log file not found".
+aws s3 cp --no-progress results.html $S3_PATH/results.html || echo "results file not found".
+aws s3 cp --no-progress raw.log $S3_PATH/raw.log || echo "raw.log file not found".
+aws s3 cp --no-progress result.json $S3_PATH/result.json --content-type "text/plain; charset=utf-8" || echo "result.json not found".
\ No newline at end of file
diff --git a/.github/retry.sh b/.github/retry.sh
new file mode 100755
index 000000000000..566c2cf11315
--- /dev/null
+++ b/.github/retry.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Execute command until exitcode is 0 or
+# maximum number of retries is reached
+# Example:
+# ./retry
+retries=$1
+delay=$2
+command="${@:3}"
+exitcode=0
+try=0
+until [ "$try" -ge $retries ]
+do
+ echo "$command"
+ eval "$command"
+ exitcode=$?
+ if [ $exitcode -eq 0 ]; then
+ break
+ fi
+ try=$((try+1))
+ sleep $2
+done
+exit $exitcode
diff --git a/.github/workflows/README.md b/.github/workflows/README.md
new file mode 100644
index 000000000000..56415c2a7478
--- /dev/null
+++ b/.github/workflows/README.md
@@ -0,0 +1,13 @@
+## Scheduled Build Run Results
+
+Results for **the latest** release_workflow scheduled runs.
+
+| Branch | Status |
+| ------------ | - |
+| **`antalya`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aantalya) |
+| **`project-antalya-24.12.2`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aproject-antalya-24.12.2) |
+| **`customizations/22.8.21`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/22.8.21) |
+| **`customizations/23.3.19`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.3.19) |
+| **`customizations/23.8.16`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.8.16) |
+| **`customizations/24.3.14`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.3.14) |
+| **`customizations/24.8.11`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.8.11) |
diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml
index 1f3f219946f7..3a819abbdbd5 100644
--- a/.github/workflows/backport_branches.yml
+++ b/.github/workflows/backport_branches.yml
@@ -1,1196 +1,281 @@
-# generated by praktika
-
+# yamllint disable rule:comments-indentation
name: BackportPR
-on:
- pull_request:
- branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]']
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
- DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }}
- DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }}
- CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }}
-# Allow updating GH commit statuses and PR comments to post an actual job reports link
-permissions: write-all
+on: # yamllint disable-line rule:truthy
+ push:
+ branches:
+ - 'backport/**'
-jobs:
+# Cancel the previous wf run in PRs.
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
- config_workflow:
+jobs:
+ RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
- needs: []
- name: "Config Workflow"
outputs:
- data: ${{ steps.run.outputs.DATA }}
+ data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Config Workflow' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Config Workflow' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_arm:
+ clear-repository: true # to ensure correct digests
+ fetch-depth: 0 # to get version
+ filter: tree:0
+ - name: Debug Info
+ uses: ./.github/actions/debug
+ - name: Labels check
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 run_check.py
+ - name: Python unit tests
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ echo "Testing the main ci directory"
+ python3 -m unittest discover -s . -p 'test_*.py'
+ - name: PrepareRunConfig
+ id: runconfig
+ run: |
+ echo "::group::configure CI run"
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+
+ echo "::group::CI run configure results"
+ python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+
+ {
+ echo 'CI_DATA<> "$GITHUB_OUTPUT"
+ - name: Re-create GH statuses for skipped jobs if any
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
+ BuildDockers:
+ needs: [RunConfig]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/docker_test_images.yml
+ with:
+ data: ${{ needs.RunConfig.outputs.data }}
+ CompatibilityCheckX86:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Compatibility check (release)
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+ CompatibilityCheckAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Compatibility check (aarch64)
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+#########################################################################################
+#################################### ORDINARY BUILDS ####################################
+#########################################################################################
+ BuilderDebRelease:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_release
+ checkout_depth: 0
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebAarch64:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_aarch64
+ checkout_depth: 0
+ data: ${{ needs.RunConfig.outputs.data }}
+ runner_type: builder-aarch64
+ BuilderDebAsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_asan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebTsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_tsan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebDebug:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_debug
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderBinDarwin:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: binary_darwin
+ data: ${{ needs.RunConfig.outputs.data }}
+ checkout_depth: 0
+ BuilderBinDarwinAarch64:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: binary_darwin_aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+ checkout_depth: 0
+ runner_type: builder-aarch64
+############################################################################################
+##################################### Docker images #######################################
+############################################################################################
+ DockerServerImage:
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Docker server image
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+ DockerKeeperImage:
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Docker keeper image
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+############################################################################################
+##################################### BUILD REPORTER #######################################
+############################################################################################
+ Builds_Report:
+ # run report check for failed builds to indicate the CI error
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
+ needs: [RunConfig, BuilderDebAarch64, BuilderDebAsan, BuilderDebDebug, BuilderDebRelease, BuilderDebTsan, BuilderBinDarwin, BuilderBinDarwinAarch64]
runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
- name: "Dockers Build (arm)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (arm)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (arm)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_amd_and_merge:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_arm]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKSBhbmQgTWVyZ2U=') }}
- name: "Dockers Build (amd) and Merge"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_debug:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
- name: "Build (amd_debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_debug)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_debug)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_release:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
- name: "Build (amd_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_release)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_release)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_asan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
- name: "Build (amd_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_asan)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_asan)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
- name: "Build (amd_tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_tsan)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_tsan)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
- name: "Build (arm_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_release)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_release)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_darwin:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
- name: "Build (amd_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_darwin)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_darwin)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
- name: "Build (arm_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_darwin)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_darwin)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_server_image:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
+ - name: Download reports
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --pre --job-name Builds
+ - name: Builds report
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_tsan package_debug binary_darwin binary_darwin_aarch64
+ - name: Set status
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --post --job-name Builds
+############################################################################################
+#################################### INSTALL PACKAGES ######################################
+############################################################################################
+ InstallPackagesTestRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Install packages (release)
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 install_check.py "$CHECK_NAME"
+ InstallPackagesTestAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Install packages (aarch64)
+ runner_type: altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-app-docker-ce
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 install_check.py "$CHECK_NAME"
+##############################################################################################
+########################### FUNCTIONAl STATELESS TESTS #######################################
+##############################################################################################
+ FunctionalStatelessTestAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Stateless tests (asan)
+ runner_type: func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+##############################################################################################
+######################################### STRESS TESTS #######################################
+##############################################################################################
+ StressTestTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Stress test (tsan)
+ runner_type: altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04
+ data: ${{ needs.RunConfig.outputs.data }}
+#############################################################################################
+############################# INTEGRATION TESTS #############################################
+#############################################################################################
+ IntegrationTestsAsanOldAnalyzer:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Integration tests (asan, old analyzer)
+ runner_type: altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04
+ data: ${{ needs.RunConfig.outputs.data }}
+ IntegrationTestsTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Integration tests (tsan)
+ runner_type: altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04
+ data: ${{ needs.RunConfig.outputs.data }}
+ FinishCheck:
+ if: ${{ !cancelled() }}
+ needs:
+ - RunConfig
+ - Builds_Report
+ - FunctionalStatelessTestAsan
+ - StressTestTsan
+ - IntegrationTestsTsan
+ - IntegrationTestsAsanOldAnalyzer
+ - CompatibilityCheckX86
+ - CompatibilityCheckAarch64
runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
- name: "Docker server image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ clear-repository: true
+ - name: Finish label
+ if: ${{ !failure() }}
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker server image' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker server image' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_keeper_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
- name: "Docker keeper image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ # update mergeable check
+ python3 merge_pr.py --set-ci-status
+ # update overall ci report
+ python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ python3 merge_pr.py
+ - name: Check Workflow results
+ if: ${{ !cancelled() }}
+ run: |
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker keeper image' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker keeper image' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAocmVsZWFzZSk=') }}
- name: "Install packages (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (release)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (release)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYWFyY2g2NCk=') }}
- name: "Install packages (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (aarch64)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (aarch64)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
- name: "Compatibility check (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (release)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (release)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
- name: "Compatibility check (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_asan_1_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhc2FuLCAxLzIp') }}
- name: "Stateless tests (asan, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (asan, 1/2)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (asan, 1/2)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_asan_2_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhc2FuLCAyLzIp') }}
- name: "Stateless tests (asan, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (asan, 2/2)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (asan, 2/2)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHRzYW4p') }}
- name: "Stress test (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (tsan)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (tsan)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMy82KQ==') }}
- name: "Integration tests (asan, old analyzer, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNC82KQ==') }}
- name: "Integration tests (asan, old analyzer, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDEvNik=') }}
- name: "Integration tests (tsan, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDIvNik=') }}
- name: "Integration tests (tsan, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDMvNik=') }}
- name: "Integration tests (tsan, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDQvNik=') }}
- name: "Integration tests (tsan, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDUvNik=') }}
- name: "Integration tests (tsan, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDYvNik=') }}
- name: "Integration tests (tsan, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
+ python3 ./tests/ci/ci_buddy.py --check-wf-status
diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml
new file mode 100644
index 000000000000..46ff5794b5ba
--- /dev/null
+++ b/.github/workflows/cancel.yml
@@ -0,0 +1,19 @@
+name: Cancel
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+
+on: # yamllint disable-line rule:truthy
+ workflow_run:
+ workflows: ["PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"]
+ types:
+ - requested
+jobs:
+ cancel:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: styfle/cancel-workflow-action@0.9.1
+ with:
+ all_but_latest: true
+ workflow_id: ${{ github.event.workflow.id }}
diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml
index 315673d4abcc..8e5191eb33cc 100644
--- a/.github/workflows/cherry_pick.yml
+++ b/.github/workflows/cherry_pick.yml
@@ -28,7 +28,7 @@ jobs:
REPO_TEAM=core
EOF
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
clear-repository: true
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
diff --git a/.github/workflows/compare_fails.yml b/.github/workflows/compare_fails.yml
new file mode 100644
index 000000000000..5dbfb9776a47
--- /dev/null
+++ b/.github/workflows/compare_fails.yml
@@ -0,0 +1,104 @@
+name: Compare CI Failures
+
+on:
+ workflow_dispatch:
+ inputs:
+ current_ref:
+ description: 'Current reference (commit hash or git tag) (default: current commit on selected branch)'
+ required: false
+ type: string
+ previous_ref:
+ description: 'Previous reference to compare with (commit hash, git tag or workflow url) (default: previous stable tag for current reference)'
+ required: false
+ type: string
+ upstream_ref:
+ description: 'Upstream reference to compare with (commit hash, git tag or MAJOR.MINOR version) (default: previous lts tag for current reference)'
+ required: false
+ type: string
+ include_broken:
+ description: 'Include BROKEN tests in comparison'
+ required: false
+ type: boolean
+ default: false
+ push:
+ tags:
+ - 'v*.altinity*'
+
+env:
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+
+jobs:
+ Compare:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check commit status
+ run: |
+ if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ inputs.current_ref }}" ]]; then
+ # For workflow_dispatch with custom ref, skip the check
+ exit 0
+ fi
+
+ # Query GitHub API for commit status
+ STATUSES=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ "https://api.github.com/repos/${{ github.repository }}/commits/${{ github.sha }}/status")
+
+ # Check if there are any statuses
+ if [ "$(echo $STATUSES | jq '.total_count')" -eq 0 ]; then
+ echo "No commit statuses found for ${{ github.sha }}. Assuming tests have not run yet. Aborting workflow."
+ exit 1
+ fi
+
+ echo "Found commit statuses, proceeding with comparison."
+
+ - name: Check out repository code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ ref: ${{ inputs.current_ref || github.ref }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+ cache: 'pip'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install clickhouse-driver requests pandas tabulate
+
+ - name: Set default refs
+ id: default_refs
+ run: |
+ VERSION=$(git describe --tags --abbrev=0 | sed 's/v\([0-9]\+\.[0-9]\+\).*/\1/')
+ echo "Detected version: $VERSION"
+
+ PREVIOUS_TAG_COMMIT=$(git log -1 --until=yesterday --tags=v${VERSION}*.altinity* | grep -Po "(?<=commit ).*")
+ echo "PREVIOUS_TAG: $(git tag --contains $PREVIOUS_TAG_COMMIT | grep 'altinity') $PREVIOUS_TAG_COMMIT"
+ UPSTREAM_TAG_COMMIT=$(git log -1 --tags=v${VERSION}*-lts | grep -Po "(?<=commit ).*")
+ echo "UPSTREAM_TAG: $(git tag --contains $UPSTREAM_TAG_COMMIT | grep 'lts') $UPSTREAM_TAG_COMMIT"
+
+ echo "PREVIOUS_TAG_COMMIT=$PREVIOUS_TAG_COMMIT" >> $GITHUB_OUTPUT
+ echo "UPSTREAM_TAG_COMMIT=$UPSTREAM_TAG_COMMIT" >> $GITHUB_OUTPUT
+
+ - name: Comparison report
+ if: ${{ !cancelled() }}
+ run: |
+ git clone https://github.com/Altinity/actions.git
+ cd actions
+ git checkout c5751cefd4f56bd7300b5f6d84a5ae9d0b686772
+ python3 scripts/compare_ci_fails.py \
+ --current-ref ${{ inputs.current_ref || github.sha }} \
+ --previous-ref ${{ inputs.previous_ref || steps.default_refs.outputs.PREVIOUS_TAG_COMMIT }} \
+ --upstream-ref ${{ inputs.upstream_ref || steps.default_refs.outputs.UPSTREAM_TAG_COMMIT }} \
+ ${{ inputs.include_broken && '--broken' || '' }}
+ cat comparison_results.md >> $GITHUB_STEP_SUMMARY
+
+ - name: Upload comparison results
+ uses: actions/upload-artifact@v4
+ with:
+ name: comparison-results
+ path: |
+ actions/comparison_results.md
diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
index b53bf1720963..da8197e3de80 100644
--- a/.github/workflows/create_release.yml
+++ b/.github/workflows/create_release.yml
@@ -70,7 +70,7 @@ jobs:
runs-on: [self-hosted, release-maker]
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
diff --git a/.github/workflows/docker_publish.yml b/.github/workflows/docker_publish.yml
new file mode 100644
index 000000000000..65ca24cc1354
--- /dev/null
+++ b/.github/workflows/docker_publish.yml
@@ -0,0 +1,120 @@
+name: Republish Multiarch Docker Image
+
+on:
+ workflow_dispatch:
+ inputs:
+ docker_image:
+ description: 'Multiarch Docker image with tag'
+ required: true
+ release_environment:
+ description: 'Select release type: "staging" or "production"'
+ type: choice
+ default: 'staging'
+ options:
+ - staging
+ - production
+ upload_artifacts:
+ description: 'Upload artifacts directly in this workflow'
+ type: boolean
+ default: true
+ workflow_call:
+ inputs:
+ docker_image:
+ type: string
+ required: true
+ release_environment:
+ type: string
+ required: false
+ default: 'staging'
+ upload_artifacts:
+ type: boolean
+ required: false
+ default: false
+ outputs:
+ image_archives_path:
+ description: 'Path to the image archives directory'
+ value: ${{ jobs.republish.outputs.image_archives_path }}
+
+env:
+ IMAGE: ${{ github.event.inputs.docker_image || inputs.docker_image }}
+
+jobs:
+ republish:
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ outputs:
+ image_archives_path: ${{ steps.set_path.outputs.image_archives_path }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Docker Hub Login
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Set clickhouse-server version as new tag
+ run: |
+ # Determine "clickhouse-server" or "clickhouse-keeper"
+ echo "COMPONENT=$(echo "$IMAGE" | sed -E 's|.*/(clickhouse-[^:]+):.*|\1|')" >> $GITHUB_ENV
+ echo "Component determined: $COMPONENT"
+
+ # Run the container to get the version
+ CONTAINER_HASH=$(docker run -d --rm $IMAGE 2>&1)
+ NEW_TAG=$(.github/retry.sh 30 10 docker exec $CONTAINER_HASH bash -c "$COMPONENT --version")
+ echo "Base tag from clickhouse version: $NEW_TAG"
+
+ # Append "-prerelease" if necessary
+ if [ "${{ github.event.inputs.release_type || inputs.release_type }}" = "staging" ]; then
+ NEW_TAG="${BASE_TAG}-prerelease"
+ fi
+
+ if [[ "$IMAGE" == *-alpine* ]]; then
+ NEW_TAG="${NEW_TAG}-alpine"
+ fi
+ echo "New tag: $NEW_TAG"
+
+ # Export the new tag
+ echo "new_tag=$NEW_TAG" >> $GITHUB_ENV
+
+ - name: Process multiarch manifest
+ run: |
+ echo "Re-tag multiarch image $IMAGE to altinity/$COMPONENT:$NEW_TAG"
+ docker buildx imagetools create --tag "altinity/$COMPONENT:$NEW_TAG" "$IMAGE"
+
+ # Create directory for image archives
+ mkdir -p image_archives
+
+ # Pull and save platform-specific images
+ for PLATFORM in "linux/amd64" "linux/arm64"; do
+ echo "Pulling and saving image for $PLATFORM..."
+ # Pull the specific platform image
+ docker pull --platform $PLATFORM "altinity/$COMPONENT:$NEW_TAG"
+
+ # Save the image to a tar file
+ ARCH=$(echo $PLATFORM | cut -d'/' -f2)
+ docker save "altinity/$COMPONENT:$NEW_TAG" -o "image_archives/${COMPONENT}-${NEW_TAG}-${ARCH}.tar"
+ done
+
+ # Save manifest inspection
+ docker buildx imagetools inspect "altinity/$COMPONENT:$NEW_TAG" > image_archives/manifest.txt
+
+ # Compress the archives
+ cd image_archives
+ for file in *.tar; do
+ gzip "$file"
+ done
+ cd ..
+
+ - name: Set image archives path
+ id: set_path
+ run: |
+ echo "image_archives_path=${{ github.workspace }}/image_archives" >> $GITHUB_OUTPUT
+
+ - name: Upload image archives
+ if: ${{ github.event.inputs.upload_artifacts || inputs.upload_artifacts }}
+ uses: actions/upload-artifact@v3
+ with:
+ name: docker-images-backup
+ path: image_archives/
+ retention-days: 90
diff --git a/.github/workflows/docker_test_images.yml b/.github/workflows/docker_test_images.yml
index 4cc9e4c8a820..1b8d94279407 100644
--- a/.github/workflows/docker_test_images.yml
+++ b/.github/workflows/docker_test_images.yml
@@ -1,5 +1,4 @@
name: Build docker images
-
'on':
workflow_call:
inputs:
@@ -12,18 +11,33 @@ name: Build docker images
required: false
type: boolean
default: false
+ secrets:
+ secret_envs:
+ description: if given, it's passed to the environments
+ required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
env:
- PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+
jobs:
DockerBuildAarch64:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_aarch64) != '[]'
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
@@ -33,12 +47,12 @@ jobs:
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_aarch64) }}'
DockerBuildAmd64:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_amd64) != '[]'
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
@@ -49,12 +63,12 @@ jobs:
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_amd64) }}'
DockerMultiArchManifest:
needs: [DockerBuildAmd64, DockerBuildAarch64]
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
if: |
!failure() && !cancelled() && (toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]' || inputs.set_latest)
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
@@ -65,7 +79,9 @@ jobs:
FLAG_LATEST='--set-latest'
echo "latest tag will be set for resulting manifests"
fi
+ # NOTE(strtgbb): The --no-reports flag avoids a strange error setting the commit status
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
+ --no-reports \
$FLAG_LATEST
diff --git a/.github/workflows/grype_scan.yml b/.github/workflows/grype_scan.yml
new file mode 100644
index 000000000000..b6781c386f94
--- /dev/null
+++ b/.github/workflows/grype_scan.yml
@@ -0,0 +1,152 @@
+name: Grype Scan
+run-name: Grype Scan ${{ inputs.docker_image }}
+
+on:
+ workflow_dispatch:
+ # Inputs for manual run
+ inputs:
+ docker_image:
+ description: 'Docker image. If no tag, it will be determined by version_helper.py'
+ required: true
+ workflow_call:
+ # Inputs for workflow call
+ inputs:
+ docker_image:
+ description: 'Docker image. If no tag, it will be determined by version_helper.py'
+ required: true
+ type: string
+ version:
+ description: 'Version tag. If no version, it will be determined by version_helper.py'
+ required: false
+ type: string
+ default: ""
+ tag-suffix:
+ description: 'Tag suffix. To be appended the version from version_helper.py'
+ required: false
+ type: string
+ default: ""
+env:
+ PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ GRYPE_VERSION: "v0.92.2-arm64v8"
+
+jobs:
+ grype_scan:
+ name: Grype Scan
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Docker
+ uses: docker/setup-buildx-action@v3
+
+ - name: Set up Python
+ run: |
+ export TESTFLOWS_VERSION="2.4.19"
+ sudo apt-get update
+ sudo apt-get install -y python3-pip python3-venv
+ python3 -m venv venv
+ source venv/bin/activate
+ pip install --upgrade requests chardet urllib3 unidiff boto3 PyGithub
+ pip install testflows==$TESTFLOWS_VERSION awscli==1.33.28
+ echo PATH=$PATH >>$GITHUB_ENV
+
+ - name: Set image tag if not given
+ if: ${{ !contains(inputs.docker_image, ':') }}
+ id: set_version
+ env:
+ TAG_SUFFIX: ${{ inputs.tag-suffix }}
+ SPECIFIED_VERSION: ${{ inputs.version }}
+ run: |
+ python3 ./tests/ci/version_helper.py | grep = | tee /tmp/version_info
+ source /tmp/version_info
+ if [ -z "$SPECIFIED_VERSION" ]; then
+ VERSION=$CLICKHOUSE_VERSION_STRING
+ else
+ VERSION=$SPECIFIED_VERSION
+ fi
+ echo "docker_image=${{ inputs.docker_image }}:$PR_NUMBER-$VERSION$TAG_SUFFIX" >> $GITHUB_OUTPUT
+ echo "commit_sha=$CLICKHOUSE_VERSION_GITHASH" >> $GITHUB_OUTPUT
+
+ - name: Run Grype Scan
+ run: |
+ DOCKER_IMAGE=${{ steps.set_version.outputs.docker_image || inputs.docker_image }}
+ ./.github/grype/run_grype_scan.sh $DOCKER_IMAGE
+
+ - name: Parse grype results
+ run: |
+ python3 -u ./.github/grype/parse_vulnerabilities_grype.py -o nice --no-colors --log raw.log --test-to-end
+
+ - name: Transform and Upload Grype Results
+ if: always()
+ id: upload_results
+ env:
+ S3_BUCKET: "altinity-build-artifacts"
+ COMMIT_SHA: ${{ steps.set_version.outputs.commit_sha || github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ PR_NUMBER: ${{ env.PR_NUMBER || github.event.pull_request.number || 0 }}
+ DOCKER_IMAGE: ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}
+ run: |
+ echo "PR_NUMBER=$PR_NUMBER"
+ ./.github/grype/transform_and_upload_results_s3.sh
+
+ - name: Create step summary
+ if: always()
+ id: create_summary
+ run: |
+ jq -r '"**Image**: \(.source.target.userInput)"' result.json >> $GITHUB_STEP_SUMMARY
+ jq -r '.distro | "**Distro**: \(.name):\(.version)"' result.json >> $GITHUB_STEP_SUMMARY
+ if jq -e '.matches | length == 0' result.json > /dev/null; then
+ echo "No CVEs" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "| Severity | Count |" >> $GITHUB_STEP_SUMMARY
+ echo "|------------|-------|" >> $GITHUB_STEP_SUMMARY
+ jq -r '
+ .matches |
+ map(.vulnerability.severity) |
+ group_by(.) |
+ map({severity: .[0], count: length}) |
+ sort_by(.severity) |
+ map("| \(.severity) | \(.count) |") |
+ .[]
+ ' result.json >> $GITHUB_STEP_SUMMARY
+ fi
+
+ HIGH_COUNT=$(jq -r '.matches | map(.vulnerability.severity) | map(select(. == "High")) | length' result.json)
+ CRITICAL_COUNT=$(jq -r '.matches | map(.vulnerability.severity) | map(select(. == "Critical")) | length' result.json)
+ TOTAL_HIGH_CRITICAL=$((HIGH_COUNT + CRITICAL_COUNT))
+ echo "total_high_critical=$TOTAL_HIGH_CRITICAL" >> $GITHUB_OUTPUT
+
+ if [ $TOTAL_HIGH_CRITICAL -gt 0 ]; then
+ echo '## High and Critical vulnerabilities found' >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ cat raw.log | tfs --no-colors show tests | grep -Pi 'High|Critical' >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ fi
+
+ - name: Set commit status
+ if: always()
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.repos.createCommitStatus({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ sha: '${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}',
+ state: '${{ steps.create_summary.outputs.total_high_critical > 0 && 'failure' || 'success' }}',
+ target_url: '${{ steps.upload_results.outputs.https_s3_path }}/results.html',
+ description: 'Grype Scan Completed with ${{ steps.create_summary.outputs.total_high_critical }} high/critical vulnerabilities',
+ context: 'Grype Scan ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}'
+ })
+
+ - name: Upload artifacts
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: grype-results-${{ hashFiles('raw.log') }}
+ path: |
+ result.json
+ nice.log.txt
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 7b7d950eea16..09a6c2dbc747 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -1,4158 +1,142 @@
-# generated by praktika
-
+# yamllint disable rule:comments-indentation
name: MasterCI
-on:
- push:
- branches: ['master']
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
- CHECKOUT_REF: ""
-
-# Allow updating GH commit statuses and PR comments to post an actual job reports link
-permissions: write-all
+on: # yamllint disable-line rule:truthy
+ push:
+ branches:
+ - 'master'
jobs:
-
- config_workflow:
+ RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
- needs: []
- name: "Config Workflow"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Config Workflow' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Config Workflow' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_tidy:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90aWR5KQ==') }}
- name: "Build (amd_tidy)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_tidy)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_tidy)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_debug:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
- name: "Build (amd_debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_release:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
- name: "Build (amd_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_asan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
- name: "Build (amd_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
- name: "Build (amd_tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_msan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }}
- name: "Build (amd_msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_ubsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }}
- name: "Build (amd_ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_binary:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
- name: "Build (amd_binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
- name: "Build (arm_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_asan:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }}
- name: "Build (arm_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_coverage:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9jb3ZlcmFnZSk=') }}
- name: "Build (amd_coverage)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_coverage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_coverage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_binary:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }}
- name: "Build (arm_binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_darwin:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
- name: "Build (amd_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
- name: "Build (arm_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_v80compat:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }}
- name: "Build (arm_v80compat)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_freebsd:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }}
- name: "Build (amd_freebsd)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_ppc64le:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }}
- name: "Build (ppc64le)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_compat:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }}
- name: "Build (amd_compat)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_musl:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }}
- name: "Build (amd_musl)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_riscv64:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }}
- name: "Build (riscv64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_s390x:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }}
- name: "Build (s390x)"
outputs:
- data: ${{ steps.run.outputs.DATA }}
+ data: ${{ steps.runconfig.outputs.CI_DATA }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
+ with:
+ clear-repository: true # to ensure correct digests
+ fetch-depth: 0 # to get version
+ filter: tree:0
+ - name: Debug Info
+ uses: ./.github/actions/debug
+ - name: Merge sync PR
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 sync_pr.py --merge || :
+# Runs in MQ:
+# - name: Python unit tests
+# run: |
+# cd "$GITHUB_WORKSPACE/tests/ci"
+# echo "Testing the main ci directory"
+# python3 -m unittest discover -s . -p 'test_*.py'
+ - name: PrepareRunConfig
+ id: runconfig
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
+
+ echo "::group::CI configuration"
+ python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+
+ {
+ echo 'CI_DATA<> "$GITHUB_OUTPUT"
+ - name: Re-create GH statuses for skipped jobs if any
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
+# Runs in MQ:
+# BuildDockers:
+# needs: [RunConfig]
+# if: ${{ !failure() && !cancelled() }}
+# uses: ./.github/workflows/docker_test_images.yml
+# with:
+# data: ${{ needs.RunConfig.outputs.data }}
+ # StyleCheck:
+ # needs: [RunConfig, BuildDockers]
+ # if: ${{ !failure() && !cancelled() }}
+ # uses: ./.github/workflows/reusable_test.yml
+ # with:
+ # test_name: Style check
+ # runner_type: style-checker
+ # data: ${{ needs.RunConfig.outputs.data }}
+ # run_command: |
+ # python3 style_check.py --no-push
+
+ ################################# Main stages #################################
+ # for main CI chain
+ #
+ Builds_1:
+ needs: [RunConfig]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
+ # using callable wf (reusable_stage.yml) allows grouping all nested jobs under a tab
+ uses: ./.github/workflows/reusable_build_stage.yml
+ with:
+ stage: Builds_1
+ data: ${{ needs.RunConfig.outputs.data }}
+ Tests_1:
+ needs: [RunConfig, Builds_1]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
+ uses: ./.github/workflows/reusable_test_stage.yml
+ with:
+ stage: Tests_1
+ data: ${{ needs.RunConfig.outputs.data }}
+ Builds_2:
+ needs: [RunConfig, Builds_1]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_2') }}
+ uses: ./.github/workflows/reusable_build_stage.yml
+ with:
+ stage: Builds_2
+ data: ${{ needs.RunConfig.outputs.data }}
+ Tests_2_ww:
+ needs: [RunConfig, Builds_2]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }}
+ uses: ./.github/workflows/reusable_test_stage.yml
+ with:
+ stage: Tests_2_ww
+ data: ${{ needs.RunConfig.outputs.data }}
+ Tests_2:
+ # Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
+ needs: [RunConfig, Builds_1]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
+ uses: ./.github/workflows/reusable_test_stage.yml
+ with:
+ stage: Tests_2
+ data: ${{ needs.RunConfig.outputs.data }}
+
+ ################################# Reports #################################
+ # Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
+ Builds_Report:
+ # run report check for failed builds to indicate the CI error
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
+ needs: [RunConfig, Builds_1, Builds_2]
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Builds
+ runner_type: altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-app-docker-ce
+ data: ${{ needs.RunConfig.outputs.data }}
+
+ FinishCheck:
+ if: ${{ !cancelled() }}
+ needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
+ runs-on: [self-hosted, style-checker-aarch64]
steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
+ - name: Finish label
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_loongarch64:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }}
- name: "Build (loongarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ - name: Check Workflow results
+ if: ${{ !cancelled() }}
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_fuzzers:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGZ1enplcnMp') }}
- name: "Build (fuzzers)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (fuzzers)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (fuzzers)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_binary:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYmluYXJ5KQ==') }}
- name: "Unit tests (binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }}
- name: "Unit tests (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }}
- name: "Unit tests (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }}
- name: "Unit tests (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }}
- name: "Unit tests (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_server_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
- name: "Docker server image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_keeper_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
- name: "Docker keeper image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAocmVsZWFzZSk=') }}
- name: "Install packages (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYWFyY2g2NCk=') }}
- name: "Install packages (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
- name: "Compatibility check (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
- name: "Compatibility check (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_asan_1_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhc2FuLCAxLzIp') }}
- name: "Stateless tests (asan, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (asan, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (asan, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_asan_2_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhc2FuLCAyLzIp') }}
- name: "Stateless tests (asan, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (asan, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (asan, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_release:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChyZWxlYXNlKQ==') }}
- name: "Stateless tests (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_release_old_analyzer_s3_databasereplicated_1_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChyZWxlYXNlLCBvbGQgYW5hbHl6ZXIsIHMzLCBEYXRhYmFzZVJlcGxpY2F0ZWQsIDEvMik=') }}
- name: "Stateless tests (release, old analyzer, s3, DatabaseReplicated, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (release, old analyzer, s3, DatabaseReplicated, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (release, old analyzer, s3, DatabaseReplicated, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_release_old_analyzer_s3_databasereplicated_2_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChyZWxlYXNlLCBvbGQgYW5hbHl6ZXIsIHMzLCBEYXRhYmFzZVJlcGxpY2F0ZWQsIDIvMik=') }}
- name: "Stateless tests (release, old analyzer, s3, DatabaseReplicated, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (release, old analyzer, s3, DatabaseReplicated, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (release, old analyzer, s3, DatabaseReplicated, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_release_parallelreplicas_s3_storage:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChyZWxlYXNlLCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlKQ==') }}
- name: "Stateless tests (release, ParallelReplicas, s3 storage)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (release, ParallelReplicas, s3 storage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (release, ParallelReplicas, s3 storage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChkZWJ1Zyk=') }}
- name: "Stateless tests (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_1_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCAxLzMp') }}
- name: "Stateless tests (tsan, 1/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_2_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCAyLzMp') }}
- name: "Stateless tests (tsan, 2/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_3_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCAzLzMp') }}
- name: "Stateless tests (tsan, 3/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_msan_1_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChtc2FuLCAxLzQp') }}
- name: "Stateless tests (msan, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (msan, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (msan, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_msan_2_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChtc2FuLCAyLzQp') }}
- name: "Stateless tests (msan, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (msan, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (msan, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_msan_3_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChtc2FuLCAzLzQp') }}
- name: "Stateless tests (msan, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (msan, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (msan, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_msan_4_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChtc2FuLCA0LzQp') }}
- name: "Stateless tests (msan, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (msan, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (msan, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh1YnNhbik=') }}
- name: "Stateless tests (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_debug_s3_storage:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChkZWJ1ZywgczMgc3RvcmFnZSk=') }}
- name: "Stateless tests (debug, s3 storage)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (debug, s3 storage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (debug, s3 storage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_s3_storage_1_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCBzMyBzdG9yYWdlLCAxLzMp') }}
- name: "Stateless tests (tsan, s3 storage, 1/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_s3_storage_2_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCBzMyBzdG9yYWdlLCAyLzMp') }}
- name: "Stateless tests (tsan, s3 storage, 2/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_s3_storage_3_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCBzMyBzdG9yYWdlLCAzLzMp') }}
- name: "Stateless tests (tsan, s3 storage, 3/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_aarch64:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhYXJjaDY0KQ==') }}
- name: "Stateless tests (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_azure_asan_1_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhenVyZSwgYXNhbiwgMS8zKQ==') }}
- name: "Stateless tests (azure, asan, 1/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (azure, asan, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (azure, asan, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_azure_asan_2_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhenVyZSwgYXNhbiwgMi8zKQ==') }}
- name: "Stateless tests (azure, asan, 2/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (azure, asan, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (azure, asan, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_azure_asan_3_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhenVyZSwgYXNhbiwgMy8zKQ==') }}
- name: "Stateless tests (azure, asan, 3/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (azure, asan, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (azure, asan, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMy82KQ==') }}
- name: "Integration tests (asan, old analyzer, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNC82KQ==') }}
- name: "Integration tests (asan, old analyzer, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_1_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDEvNCk=') }}
- name: "Integration tests (release, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_2_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDIvNCk=') }}
- name: "Integration tests (release, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_3_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDMvNCk=') }}
- name: "Integration tests (release, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_4_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDQvNCk=') }}
- name: "Integration tests (release, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_aarch64_1_4:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFhcmNoNjQsIDEvNCk=') }}
- name: "Integration tests (aarch64, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (aarch64, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (aarch64, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_aarch64_2_4:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFhcmNoNjQsIDIvNCk=') }}
- name: "Integration tests (aarch64, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (aarch64, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (aarch64, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_aarch64_3_4:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFhcmNoNjQsIDMvNCk=') }}
- name: "Integration tests (aarch64, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (aarch64, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (aarch64, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_aarch64_4_4:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFhcmNoNjQsIDQvNCk=') }}
- name: "Integration tests (aarch64, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (aarch64, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (aarch64, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDEvNik=') }}
- name: "Integration tests (tsan, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDIvNik=') }}
- name: "Integration tests (tsan, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDMvNik=') }}
- name: "Integration tests (tsan, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDQvNik=') }}
- name: "Integration tests (tsan, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDUvNik=') }}
- name: "Integration tests (tsan, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDYvNik=') }}
- name: "Integration tests (tsan, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgMS82KQ==') }}
- name: "Stateless tests (coverage, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgMi82KQ==') }}
- name: "Stateless tests (coverage, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgMy82KQ==') }}
- name: "Stateless tests (coverage, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgNC82KQ==') }}
- name: "Stateless tests (coverage, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgNS82KQ==') }}
- name: "Stateless tests (coverage, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgNi82KQ==') }}
- name: "Stateless tests (coverage, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGRlYnVnKQ==') }}
- name: "Stress test (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHRzYW4p') }}
- name: "Stress test (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFzYW4p') }}
- name: "Stress test (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHVic2FuKQ==') }}
- name: "Stress test (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKG1zYW4p') }}
- name: "Stress test (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_azure_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCB0c2FuKQ==') }}
- name: "Stress test (azure, tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (azure, tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (azure, tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_azure_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBtc2FuKQ==') }}
- name: "Stress test (azure, msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (azure, msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (azure, msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- clickbench_release:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAocmVsZWFzZSk=') }}
- name: "ClickBench (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'ClickBench (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'ClickBench (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- clickbench_aarch64:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAoYWFyY2g2NCk=') }}
- name: "ClickBench (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'ClickBench (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'ClickBench (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoZGVidWcp') }}
- name: "AST fuzzer (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYXNhbik=') }}
- name: "AST fuzzer (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAodHNhbik=') }}
- name: "AST fuzzer (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAobXNhbik=') }}
- name: "AST fuzzer (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAodWJzYW4p') }}
- name: "AST fuzzer (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChkZWJ1Zyk=') }}
- name: "BuzzHouse (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhc2FuKQ==') }}
- name: "BuzzHouse (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlICh0c2FuKQ==') }}
- name: "BuzzHouse (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChtc2FuKQ==') }}
- name: "BuzzHouse (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlICh1YnNhbik=') }}
- name: "BuzzHouse (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- performance_comparison_release_1_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAocmVsZWFzZSwgMS8zKQ==') }}
- name: "Performance Comparison (release, 1/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (release, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Performance Comparison (release, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- performance_comparison_release_2_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAocmVsZWFzZSwgMi8zKQ==') }}
- name: "Performance Comparison (release, 2/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (release, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Performance Comparison (release, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- performance_comparison_release_3_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAocmVsZWFzZSwgMy8zKQ==') }}
- name: "Performance Comparison (release, 3/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (release, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Performance Comparison (release, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- sqlancer_release:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMYW5jZXIgKHJlbGVhc2Up') }}
- name: "SQLancer (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'SQLancer (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'SQLancer (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- sqlancer_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMYW5jZXIgKGRlYnVnKQ==') }}
- name: "SQLancer (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'SQLancer (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'SQLancer (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- sqltest:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMVGVzdA==') }}
- name: "SQLTest"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'SQLTest' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'SQLTest' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
+ python3 ./tests/ci/ci_buddy.py --check-wf-status
diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml
index 31898d1471b0..34061130aeb3 100644
--- a/.github/workflows/merge_queue.yml
+++ b/.github/workflows/merge_queue.yml
@@ -1,279 +1,114 @@
-# generated by praktika
-
+# yamllint disable rule:comments-indentation
name: MergeQueueCI
-on:
- merge_group:
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
- CHECKOUT_REF: ""
+on: # yamllint disable-line rule:truthy
+ merge_group:
jobs:
-
- config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: []
- name: "Config Workflow"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Config Workflow' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Config Workflow' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_arm:
+ RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
- name: "Dockers Build (arm)"
outputs:
- data: ${{ steps.run.outputs.DATA }}
+ data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ clear-repository: true # to ensure correct digests
+ fetch-depth: 0 # to get a version
+ filter: tree:0
+ - name: Debug Info
+ uses: ./.github/actions/debug
+ - name: Cancel PR workflow
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_amd_and_merge:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_arm]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKSBhbmQgTWVyZ2U=') }}
- name: "Dockers Build (amd) and Merge"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
+ - name: Python unit tests
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ echo "Testing the main ci directory"
+ python3 -m unittest discover -s . -p 'test_*.py'
+ - name: PrepareRunConfig
+ id: runconfig
run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- style_check:
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
+
+ echo "::group::CI configuration"
+ python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+
+ {
+ echo 'CI_DATA<> "$GITHUB_OUTPUT"
+ BuildDockers:
+ needs: [RunConfig]
+ if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
+ uses: ./.github/workflows/docker_test_images.yml
+ with:
+ data: ${{ needs.RunConfig.outputs.data }}
+ StyleCheck:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Style check
+ runner_type: altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-app-docker-ce
+ run_command: |
+ python3 style_check.py --no-push
+ data: ${{ needs.RunConfig.outputs.data }}
+ FastTest:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Fast test
+ runner_type: builder
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 fast_test_check.py
+
+ Builds_1:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
+ # using callable wf (reusable_stage.yml) allows grouping all nested jobs under a tab
+ uses: ./.github/workflows/reusable_build_stage.yml
+ with:
+ stage: Builds_1
+ data: ${{ needs.RunConfig.outputs.data }}
+ Tests_1:
+ needs: [RunConfig, Builds_1]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
+ uses: ./.github/workflows/reusable_test_stage.yml
+ with:
+ stage: Tests_1
+ data: ${{ needs.RunConfig.outputs.data }}
+
+ CheckReadyForMerge:
+ if: ${{ !cancelled() }}
+ # Test_2 or Test_3 must not have jobs required for Mergeable check
+ needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3R5bGUgY2hlY2s=') }}
- name: "Style check"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- fast_test:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RmFzdCB0ZXN0') }}
- name: "Fast test"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Fast test' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Fast test' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_binary:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
- name: "Build (amd_binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
+ - name: Check and set merge status
+ if: ${{ needs.StyleCheck.result == 'success' }}
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_binary)' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_binary)' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_arm, dockers_build_amd_and_merge, style_check, fast_test, build_amd_binary]
- if: ${{ !cancelled() }}
- name: "Finish Workflow"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 merge_pr.py --set-ci-status
+ - name: Check Workflow results
+ if: ${{ !cancelled() }}
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Finish Workflow' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Finish Workflow' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
+ python3 ./tests/ci/ci_buddy.py --check-wf-status
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index ace191fc09a3..87cc85e0e9e3 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -16,7 +16,7 @@ jobs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
@@ -80,7 +80,7 @@ jobs:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
- name: Check Workflow results
if: ${{ !cancelled() }}
run: |
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index 5f365a5c3e1f..6e56ae9edd70 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -19,7 +19,7 @@ permissions: write-all
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-app-docker-ce]
needs: []
name: "Config Workflow"
outputs:
diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml
new file mode 100644
index 000000000000..965418f820d3
--- /dev/null
+++ b/.github/workflows/regression.yml
@@ -0,0 +1,731 @@
+name: Regression test workflow - Release
+'on':
+ workflow_call:
+ inputs:
+ runner_type:
+ description: the label of runner to use, can be a simple string or a comma-separated list
+ required: true
+ type: string
+ commit:
+ description: commit hash of the regression tests.
+ required: true
+ type: string
+ arch:
+ description: arch to run the tests on.
+ required: true
+ type: string
+ timeout_minutes:
+ description: Maximum number of minutes to let workflow run before GitHub cancels it.
+ default: 210
+ type: number
+ build_sha:
+ description: commit sha of the workflow run for artifact upload.
+ required: true
+ type: string
+ checkout_depth:
+ description: the value of the git shallow checkout
+ required: false
+ type: number
+ default: 1
+ submodules:
+ description: if the submodules should be checked out
+ required: false
+ type: boolean
+ default: false
+ additional_envs:
+ description: additional ENV variables to setup the job
+ type: string
+ secrets:
+ secret_envs:
+ description: if given, it's passed to the environments
+ required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
+ AWS_DEFAULT_REGION:
+ description: the region of the aws param store.
+ required: true
+ AWS_REPORT_KEY_ID:
+ description: aws s3 key id used for regression test reports.
+ required: true
+ AWS_REPORT_SECRET_ACCESS_KEY:
+ description: aws s3 secret access key used for regression test reports.
+ required: true
+ AWS_REPORT_REGION:
+ description: aws s3 region used for regression test reports.
+ required: true
+ DOCKER_USERNAME:
+ description: username of the docker user.
+ required: true
+ DOCKER_PASSWORD:
+ description: password to the docker user.
+ required: true
+ REGRESSION_AWS_S3_BUCKET:
+ description: aws s3 bucket used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_KEY_ID:
+ description: aws s3 key id used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_SECRET_ACCESS_KEY:
+ description: aws s3 secret access key used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_REGION:
+ description: aws s3 region used for regression tests.
+ required: true
+ REGRESSION_GCS_KEY_ID:
+ description: gcs key id used for regression tests.
+ required: true
+ REGRESSION_GCS_KEY_SECRET:
+ description: gcs key secret used for regression tests.
+ required: true
+ REGRESSION_GCS_URI:
+ description: gcs uri used for regression tests.
+ required: true
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ args: --test-to-end
+ --no-colors
+ --local
+ --collect-service-logs
+ --output classic
+ --parallel 1
+ --log raw.log
+ --with-analyzer
+ artifacts: builds
+ artifact_paths: |
+ ./report.html
+ ./*.log.txt
+ ./*.log
+ ./*.html
+ ./*/_instances/*.log
+ ./*/_instances/*/logs/*.log
+ ./*/*/_instances/*/logs/*.log
+ ./*/*/_instances/*.log
+ build_sha: ${{ inputs.build_sha }}
+ pr_number: ${{ github.event.number }}
+ event_name: ${{ github.event_name }}
+
+jobs:
+ runner_labels_setup:
+ name: Compute proper runner labels for the rest of the jobs
+ runs-on: ubuntu-latest
+ outputs:
+ runner_labels: ${{ steps.setVariables.outputs.runner_labels }}
+ steps:
+ - id: setVariables
+ name: Prepare runner_labels variables for the later steps
+ run: |
+
+ # Prepend self-hosted
+ input="self-hosted, ${input}"
+
+ # Remove all whitespace
+ input="$(echo ${input} | tr -d [:space:])"
+ # Make something like a JSON array from comma-separated list
+ input="[ '${input//\,/\'\, \'}' ]"
+
+ echo "runner_labels=$input" >> ${GITHUB_OUTPUT}
+ env:
+ input: ${{ inputs.runner_type }}
+
+ Common:
+ strategy:
+ fail-fast: false
+ matrix:
+ SUITE: [aes_encryption, aggregate_functions, atomic_insert, base_58, clickhouse_keeper, data_types, datetime64_extended_range, disk_level_encryption, dns, engines, example, extended_precision_data_types, iceberg, kafka, kerberos, key_value, lightweight_delete, memory, part_moves_between_shards, rbac, selects, session_timezone, ssl_server, swarms, tiered_storage, version, window_functions]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=${{ matrix.SUITE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.SUITE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} ${{ matrix.SUITE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ Alter:
+ strategy:
+ fail-fast: false
+ matrix:
+ ONLY: [replace, attach, move]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=alter
+ STORAGE=/${{ matrix.ONLY }}_partition
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u alter/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --only "/alter/${{ matrix.ONLY }} partition/*"
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.ONLY }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Alter ${{ matrix.ONLY }} partition"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: alter-${{ matrix.ONLY }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ Benchmark:
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, aws_s3, gcs]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=ontime_benchmark
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/benchmark.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Benchmark ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: benchmark-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ ClickHouseKeeperSSL:
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ SUITE=clickhouse_keeper
+ STORAGE=/ssl
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --ssl
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name=$GITHUB_JOB job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Clickhouse Keeper SSL"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ inputs.arch }}-ssl-artifacts
+ path: ${{ env.artifact_paths }}
+
+ LDAP:
+ strategy:
+ fail-fast: false
+ matrix:
+ SUITE: [authentication, external_user_directory, role_mapping]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=ldap/${{ matrix.SUITE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.SUITE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} LDAP ${{ matrix.SUITE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ldap-${{ matrix.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ Parquet:
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=parquet
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name=$GITHUB_JOB job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Parquet"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ ParquetS3:
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, aws_s3]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=parquet
+ STORAGE=${{ matrix.STORAGE}}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Parquet ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ env.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ S3:
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, aws_s3, gcs, azure]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=s3
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --azure-account-name ${{ secrets.AZURE_ACCOUNT_NAME }}
+ --azure-storage-key ${{ secrets.AZURE_STORAGE_KEY }}
+ --azure-container ${{ secrets.AZURE_CONTAINER_NAME }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} S3 ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ TieredStorage:
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, s3amazon, s3gcs]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=tiered_storage
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-uri https://s3.${{ secrets.REGRESSION_AWS_S3_REGION}}.amazonaws.com/${{ secrets.REGRESSION_AWS_S3_BUCKET }}/data/
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ --with-${{ matrix.STORAGE }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Tiered Storage ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index c404ea3ae4cc..68323d252031 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -1,1650 +1,537 @@
-# generated by praktika
-
+# yamllint disable rule:comments-indentation
name: ReleaseBranchCI
-on:
- push:
- branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]']
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
- CHECKOUT_REF: ""
-
-# Allow updating GH commit statuses and PR comments to post an actual job reports link
-permissions: write-all
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+
+on: # yamllint disable-line rule:truthy
+ pull_request:
+ types:
+ - synchronize
+ - reopened
+ - opened
+ branches:
+ - 'releases/*'
+ push:
+ branches:
+ - 'releases/*'
+ tags:
+ - '*'
+ workflow_dispatch:
+ inputs:
+ workflow_name:
+ description: 'Name of the workflow'
+ required: false
+ type: string
jobs:
-
- config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: []
- name: "Config Workflow"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Config Workflow' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Config Workflow' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
- name: "Dockers Build (arm)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (arm)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (arm)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_amd_and_merge:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_arm]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKSBhbmQgTWVyZ2U=') }}
- name: "Dockers Build (amd) and Merge"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_debug:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
- name: "Build (amd_debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_debug)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_debug)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_release:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
- name: "Build (amd_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_release)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_release)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_asan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
- name: "Build (amd_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_asan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_asan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
- name: "Build (amd_tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_tsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_tsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_msan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }}
- name: "Build (amd_msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_msan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_msan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_ubsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }}
- name: "Build (amd_ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_ubsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_ubsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_binary:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
- name: "Build (amd_binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_binary)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_binary)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
- name: "Build (arm_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_release)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_release)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_asan:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }}
- name: "Build (arm_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_asan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_asan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_darwin:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
- name: "Build (amd_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_darwin)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_darwin)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
- name: "Build (arm_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_darwin)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_darwin)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_server_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
- name: "Docker server image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker server image' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker server image' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_keeper_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
- name: "Docker keeper image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker keeper image' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker keeper image' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAocmVsZWFzZSk=') }}
- name: "Install packages (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (release)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (release)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYWFyY2g2NCk=') }}
- name: "Install packages (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (aarch64)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (aarch64)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_1_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIDEvNCk=') }}
- name: "Integration tests (asan, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, 1/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, 1/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_2_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIDIvNCk=') }}
- name: "Integration tests (asan, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, 2/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, 2/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_3_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIDMvNCk=') }}
- name: "Integration tests (asan, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, 3/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, 3/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_4_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIDQvNCk=') }}
- name: "Integration tests (asan, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, 4/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, 4/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMy82KQ==') }}
- name: "Integration tests (asan, old analyzer, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNC82KQ==') }}
- name: "Integration tests (asan, old analyzer, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_1_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDEvNCk=') }}
- name: "Integration tests (release, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 1/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 1/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_2_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDIvNCk=') }}
- name: "Integration tests (release, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 2/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 2/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_3_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDMvNCk=') }}
- name: "Integration tests (release, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 3/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 3/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_4_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDQvNCk=') }}
- name: "Integration tests (release, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 4/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 4/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDEvNik=') }}
- name: "Integration tests (tsan, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDIvNik=') }}
- name: "Integration tests (tsan, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDMvNik=') }}
- name: "Integration tests (tsan, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDQvNik=') }}
- name: "Integration tests (tsan, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDUvNik=') }}
- name: "Integration tests (tsan, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDYvNik=') }}
- name: "Integration tests (tsan, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGRlYnVnKQ==') }}
- name: "Stress test (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (debug)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (debug)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHRzYW4p') }}
- name: "Stress test (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (tsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (tsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFzYW4p') }}
- name: "Stress test (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (asan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (asan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHVic2FuKQ==') }}
- name: "Stress test (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (ubsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (ubsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKG1zYW4p') }}
- name: "Stress test (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (msan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (msan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
+ RunConfig:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ outputs:
+ data: ${{ steps.runconfig.outputs.CI_DATA }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true # to ensure correct digests
+ fetch-depth: 0 # to get version
+ filter: tree:0
+ - name: Debug Info
+ uses: ./.github/actions/debug
+ - name: PrepareRunConfig
+ id: runconfig
+ run: |
+ echo "::group::configure CI run"
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+ echo "::group::CI run configure results"
+ python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+ {
+ echo 'CI_DATA<> "$GITHUB_OUTPUT"
+ - name: Re-create GH statuses for skipped jobs if any
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PR_NUMBER/$COMMIT_SHA/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
+ BuildDockers:
+ needs: [RunConfig]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/docker_test_images.yml
+ secrets: inherit
+ with:
+ data: ${{ needs.RunConfig.outputs.data }}
+ CompatibilityCheckX86:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Compatibility check (release)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ CompatibilityCheckAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Compatibility check (aarch64)
+ runner_type: altinity-func-tester-aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+#########################################################################################
+#################################### ORDINARY BUILDS ####################################
+#########################################################################################
+ BuilderDebRelease:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_release
+ checkout_depth: 0
+ data: ${{ needs.RunConfig.outputs.data }}
+ # always rebuild on release branches to be able to publish from any commit
+ force: true
+ BuilderDebAarch64:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_aarch64
+ checkout_depth: 0
+ data: ${{ needs.RunConfig.outputs.data }}
+ # always rebuild on release branches to be able to publish from any commit
+ force: true
+ runner_type: builder-aarch64
+ BuilderDebAsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_asan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebUBsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_ubsan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebTsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_tsan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebMsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_msan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebDebug:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_debug
+ data: ${{ needs.RunConfig.outputs.data }}
+ force: true
+############################################################################################
+##################################### Docker images #######################################
+############################################################################################
+ DockerServerImage:
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Docker server image
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ DockerKeeperImage:
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Docker keeper image
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ GrypeScan:
+ needs: [RunConfig, DockerServerImage, DockerKeeperImage]
+ if: ${{ !failure() && !cancelled() }}
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - image: server
+ suffix: ''
+ - image: server
+ suffix: '-alpine'
+ - image: keeper
+ suffix: ''
+ uses: ./.github/workflows/grype_scan.yml
+ secrets: inherit
+ with:
+ docker_image: altinityinfra/clickhouse-${{ matrix.image }}
+ version: ${{ fromJson(needs.RunConfig.outputs.data).version }}
+ tag-suffix: ${{ matrix.suffix }}
+############################################################################################
+##################################### BUILD REPORTER #######################################
+############################################################################################
+ Builds_Report:
+ # run report check for failed builds to indicate the CI error
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64, BuilderDebAsan, BuilderDebUBsan, BuilderDebMsan, BuilderDebTsan, BuilderDebDebug]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ - name: Download reports
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --pre --job-name Builds
+ - name: Builds report
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_msan package_ubsan package_tsan package_debug
+ - name: Set status
+ # NOTE(vnemkov): generate and upload the report even if previous step failed
+ if: success() || failure()
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --post --job-name Builds
+ MarkReleaseReady:
+ if: ${{ !failure() && !cancelled() }}
+ needs:
+ - BuilderDebRelease
+ - BuilderDebAarch64
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Debug
+ run: |
+ echo need with different filters
+ cat << 'EOF'
+ ${{ toJSON(needs) }}
+ ${{ toJSON(needs.*.result) }}
+ no failures ${{ !contains(needs.*.result, 'failure') }}
+ no skips ${{ !contains(needs.*.result, 'skipped') }}
+ no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
+ EOF
+ - name: Not ready
+ # fail the job to be able restart it
+ if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
+ run: exit 1
+ - name: Check out repository code
+ if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ - name: Mark Commit Release Ready
+ if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 mark_release_ready.py
+############################################################################################
+#################################### INSTALL PACKAGES ######################################
+############################################################################################
+ InstallPackagesTestRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Install packages (release)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 install_check.py "$CHECK_NAME"
+ InstallPackagesTestAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Install packages (aarch64)
+ runner_type: altinity-func-tester-aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 install_check.py "$CHECK_NAME"
+##############################################################################################
+########################### FUNCTIONAl STATELESS TESTS #######################################
+##############################################################################################
+ FunctionalStatelessTestRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (release)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (aarch64)
+ runner_type: altinity-func-tester-aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (asan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (tsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestMsan:
+ needs: [RunConfig, BuilderDebMsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (msan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestUBsan:
+ needs: [RunConfig, BuilderDebUBsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (ubsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestDebug:
+ needs: [RunConfig, BuilderDebDebug]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (debug)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+##############################################################################################
+######################################### STRESS TESTS #######################################
+##############################################################################################
+ StressTestAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (asan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ StressTestTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (tsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ StressTestMsan:
+ needs: [RunConfig, BuilderDebMsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (msan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ StressTestUBsan:
+ needs: [RunConfig, BuilderDebUBsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (ubsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ StressTestDebug:
+ needs: [RunConfig, BuilderDebDebug]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (debug)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+#############################################################################################
+############################# INTEGRATION TESTS #############################################
+#############################################################################################
+ IntegrationTestsAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Integration tests (asan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ IntegrationTestsAnalyzerAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Integration tests (asan, old analyzer)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ IntegrationTestsTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Integration tests (tsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ IntegrationTestsRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Integration tests (release)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+#############################################################################################
+##################################### REGRESSION TESTS ######################################
+#############################################################################################
+ RegressionTestsRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.RunConfig.outputs.data).ci_settings.exclude_keywords, 'regression')}}
+ uses: ./.github/workflows/regression.yml
+ secrets: inherit
+ with:
+ runner_type: altinity-on-demand, altinity-regression-tester
+ commit: 6e3e6460a0fef691382205e268f2f76df1170c88
+ arch: release
+ build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ timeout_minutes: 300
+ RegressionTestsAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.RunConfig.outputs.data).ci_settings.exclude_keywords, 'regression') && !contains(fromJson(needs.RunConfig.outputs.data).ci_settings.exclude_keywords, 'aarch64')}}
+ uses: ./.github/workflows/regression.yml
+ secrets: inherit
+ with:
+ runner_type: altinity-on-demand, altinity-regression-tester-aarch64
+ commit: 6e3e6460a0fef691382205e268f2f76df1170c88
+ arch: aarch64
+ build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ timeout_minutes: 300
+ SignRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_sign.yml
+ secrets: inherit
+ with:
+ test_name: Sign release
+ runner_type: altinity-style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+ SignAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_sign.yml
+ secrets: inherit
+ with:
+ test_name: Sign aarch64
+ runner_type: altinity-style-checker-aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+ FinishCheck:
+ if: ${{ !cancelled() }}
+ needs:
+ - RunConfig
+ - DockerServerImage
+ - DockerKeeperImage
+ - Builds_Report
+ - MarkReleaseReady
+ - FunctionalStatelessTestDebug
+ - FunctionalStatelessTestRelease
+ - FunctionalStatelessTestAarch64
+ - FunctionalStatelessTestAsan
+ - FunctionalStatelessTestTsan
+ - FunctionalStatelessTestMsan
+ - FunctionalStatelessTestUBsan
+ - StressTestDebug
+ - StressTestAsan
+ - StressTestTsan
+ - StressTestMsan
+ - StressTestUBsan
+ - IntegrationTestsAsan
+ - IntegrationTestsTsan
+ - IntegrationTestsRelease
+ - CompatibilityCheckX86
+ - CompatibilityCheckAarch64
+ - RegressionTestsRelease
+ - RegressionTestsAarch64
+ - GrypeScan
+ - SignRelease
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ - name: Finish label
+ if: ${{ !failure() }}
+ run: |
+ # update overall ci report
+ python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ - name: Check Workflow results
+ if: ${{ !cancelled() }}
+ run: |
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ python3 ./tests/ci/ci_buddy.py --check-wf-status
+ - name: Finalize workflow report
+ if: ${{ !cancelled() }}
+ uses: ./.github/actions/create_workflow_report
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ with:
+ final: true
diff --git a/.github/workflows/repo-sanity-checks.yml b/.github/workflows/repo-sanity-checks.yml
new file mode 100644
index 000000000000..ec50a056b730
--- /dev/null
+++ b/.github/workflows/repo-sanity-checks.yml
@@ -0,0 +1,150 @@
+name: Repository Sanity Checks
+
+on:
+ workflow_dispatch: # Manual trigger only
+
+ workflow_call:
+
+jobs:
+ sanity-checks:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ strategy:
+ fail-fast: false # Continue with other combinations if one fails
+ matrix:
+ include:
+ # Production packages
+ - env: prod
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/apt-repo
+ - env: prod
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/yum-repo
+ # FIPS Production packages
+ - env: prod-fips
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/fips-apt-repo
+ - env: prod-fips
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/fips-yum-repo
+ # Staging packages
+ - env: staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/apt-repo
+ - env: staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/yum-repo
+ # FIPS Staging packages
+ - env: staging-fips
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/fips-apt-repo
+ - env: staging-fips
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/fips-yum-repo
+ # Hotfix packages
+ - env: hotfix
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/hotfix-apt-repo
+ - env: hotfix
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/hotfix-yum-repo
+ # Antalya experimental packages
+ - env: antalya
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/antalya-apt-repo
+ - env: antalya
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/antalya-yum-repo
+ # Hotfix staging packages
+ - env: hotfix-staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/hotfix-apt-repo
+ - env: hotfix-staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/hotfix-yum-repo
+ # Antalya experimental staging packages
+ - env: antalya-staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/antalya-apt-repo
+ - env: antalya-staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/antalya-yum-repo
+
+ steps:
+ - name: Run sanity check
+ run: |
+ cat << 'EOF' > sanity.sh
+ #!/bin/bash
+ set -e -x
+
+ # Package installation commands based on type
+ if [ "${{ matrix.type }}" = "deb" ]; then
+ export DEBIAN_FRONTEND=noninteractive
+ apt-get update && apt-get install -y apt-transport-https ca-certificates curl gnupg2 dialog sudo
+ mkdir -p /usr/share/keyrings
+ curl -s "${REPO_URL}/pubkey.gpg" | gpg --dearmor > /usr/share/keyrings/altinity-archive-keyring.gpg
+ echo "deb [signed-by=/usr/share/keyrings/altinity-archive-keyring.gpg] ${REPO_URL} stable main" > /etc/apt/sources.list.d/altinity.list
+ apt-get update
+ apt-get install -y clickhouse-server clickhouse-client
+ else
+ sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
+ yum install -y curl gnupg2 sudo
+ if [[ "${{ matrix.env }}" == *"staging"* ]]; then
+ curl "${REPO_URL}/altinity-staging.repo" -o /etc/yum.repos.d/altinity-staging.repo
+ else
+ curl "${REPO_URL}/altinity.repo" -o /etc/yum.repos.d/altinity.repo
+ fi
+ yum install -y clickhouse-server clickhouse-client
+ fi
+
+ # Ensure correct ownership
+ chown -R clickhouse /var/lib/clickhouse/
+ chown -R clickhouse /var/log/clickhouse-server/
+
+ # Check server version
+ server_version=$(clickhouse-server --version)
+ echo "$server_version" | grep "altinity" || FAILED_SERVER=true
+
+ # Start server and test
+ sudo -u clickhouse clickhouse-server --config-file /etc/clickhouse-server/config.xml --daemon
+ sleep 10
+ clickhouse-client -q 'SELECT 1'
+
+ # Check client version
+ client_version=$(clickhouse-client --version)
+ echo "$client_version" | grep "altinity" || FAILED_CLIENT=true
+
+ # Report results
+ if [ "$FAILED_SERVER" = true ]; then
+ echo "::error::Server check failed - Version: $server_version"
+ exit 1
+ elif [ "$FAILED_CLIENT" = true ]; then
+ echo "::error::Client check failed - Version: $client_version"
+ exit 1
+ else
+ echo "All checks passed successfully!"
+ fi
+ EOF
+
+ chmod +x sanity.sh
+ docker run --rm \
+ -v $(pwd)/sanity.sh:/sanity.sh \
+ -e REPO_URL="${{ matrix.repo_url }}" \
+ ${{ matrix.base }} \
+ /sanity.sh
diff --git a/.github/workflows/reusable_build.yml b/.github/workflows/reusable_build.yml
index 0256ea4bde0d..fd2e44fc16ca 100644
--- a/.github/workflows/reusable_build.yml
+++ b/.github/workflows/reusable_build.yml
@@ -1,9 +1,14 @@
-### For the pure soul wishes to move it to another place
-# https://github.com/orgs/community/discussions/9050
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
name: Build ClickHouse
'on':
@@ -34,12 +39,15 @@ name: Build ClickHouse
description: additional ENV variables to setup the job
type: string
secrets:
- robot_git_token:
- required: false
- ci_db_url:
- required: false
- ci_db_password:
+ secret_envs:
+ description: if given, it's passed to the environments
required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
jobs:
Build:
@@ -47,10 +55,10 @@ jobs:
if: ${{ contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name) || inputs.force }}
env:
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
- runs-on: [self-hosted, '${{inputs.runner_type}}']
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
@@ -61,18 +69,10 @@ jobs:
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
+ ${{secrets.secret_envs}}
DOCKER_TAG<> "$GITHUB_ENV"
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
@@ -90,6 +90,11 @@ jobs:
uses: ./.github/actions/common_setup
with:
job_type: build_check
+ - name: Create source tar
+ run: |
+ mkdir -p "$TEMP_PATH/build_check/package_release"
+ cd .. && tar czf $TEMP_PATH/build_source.src.tar.gz ClickHouse/
+ cd $TEMP_PATH && tar xvzf $TEMP_PATH/build_source.src.tar.gz
- name: Pre
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}'
@@ -109,6 +114,11 @@ jobs:
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
+ - name: Upload json report
+ uses: actions/upload-artifact@v4
+ with:
+ path: ${{ env.TEMP_PATH }}/build_report_*.json
+ name: build_report_${{inputs.build_name}}
- name: Clean
if: always()
uses: ./.github/actions/clean
diff --git a/.github/workflows/reusable_sign.yml b/.github/workflows/reusable_sign.yml
new file mode 100644
index 000000000000..7bfed2758359
--- /dev/null
+++ b/.github/workflows/reusable_sign.yml
@@ -0,0 +1,166 @@
+name: Sigining workflow
+'on':
+ workflow_call:
+ inputs:
+ test_name:
+ description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
+ required: true
+ type: string
+ runner_type:
+ description: the label of runner to use
+ required: true
+ type: string
+ run_command:
+ description: the command to launch the check
+ default: ""
+ required: false
+ type: string
+ checkout_depth:
+ description: the value of the git shallow checkout
+ required: false
+ type: number
+ default: 1
+ submodules:
+ description: if the submodules should be checked out
+ required: false
+ type: boolean
+ default: false
+ additional_envs:
+ description: additional ENV variables to setup the job
+ type: string
+ data:
+ description: ci data
+ type: string
+ required: true
+ working-directory:
+ description: sets custom working directory
+ type: string
+ default: "$GITHUB_WORKSPACE/tests/ci"
+ secrets:
+ secret_envs:
+ description: if given, it's passed to the environments
+ required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
+ GPG_BINARY_SIGNING_KEY:
+ description: gpg signing key for packages.
+ required: true
+ GPG_BINARY_SIGNING_PASSPHRASE:
+ description: gpg signing key passphrase.
+ required: true
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+ CHECK_NAME: ${{inputs.test_name}}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+
+jobs:
+ runner_labels_setup:
+ name: Compute proper runner labels for the rest of the jobs
+ runs-on: ubuntu-latest
+ outputs:
+ runner_labels: ${{ steps.setVariables.outputs.runner_labels }}
+ steps:
+ - id: setVariables
+ name: Prepare runner_labels variables for the later steps
+ run: |
+
+ # Prepend self-hosted
+ input="self-hosted, altinity-on-demand, ${input}"
+
+ # Remove all whitespace
+ input="$(echo ${input} | tr -d [:space:])"
+ # Make something like a JSON array from comma-separated list
+ input="[ '${input//\,/\'\, \'}' ]"
+
+ echo "runner_labels=$input" >> ${GITHUB_OUTPUT}
+ env:
+ input: ${{ inputs.runner_type }}
+
+ Test:
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
+ env:
+ GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
+ strategy:
+ fail-fast: false # we always wait for entire matrix
+ matrix:
+ batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ ref: ${{ fromJson(inputs.data).git_ref }}
+ submodules: ${{inputs.submodules}}
+ fetch-depth: ${{inputs.checkout_depth}}
+ filter: tree:0
+ - name: Set build envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ CHECK_NAME=${{ inputs.test_name }}
+ ${{inputs.additional_envs}}
+ ${{secrets.secret_envs}}
+ DOCKER_TAG< 1 }}
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ RUN_BY_HASH_NUM=${{matrix.batch}}
+ RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }}
+ EOF
+ - name: Pre run
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
+ - name: Sign release
+ env:
+ GPG_BINARY_SIGNING_KEY: ${{ secrets.GPG_BINARY_SIGNING_KEY }}
+ GPG_BINARY_SIGNING_PASSPHRASE: ${{ secrets.GPG_BINARY_SIGNING_PASSPHRASE }}
+ run: |
+ cd "${{ inputs.working-directory }}"
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
+ --infile ${{ toJson(inputs.data) }} \
+ --job-name '${{inputs.test_name}}' \
+ --run \
+ --force \
+ --run-command '''python3 sign_release.py'''
+ - name: Post run
+ if: ${{ !cancelled() }}
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
+ - name: Mark as done
+ if: ${{ !cancelled() }}
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
+ - name: Upload signed hashes
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{inputs.test_name}} signed-hashes
+ path: ${{ env.TEMP_PATH }}/*.gpg
+ - name: Clean
+ if: always()
+ uses: ./.github/actions/clean
diff --git a/.github/workflows/reusable_simple_job.yml b/.github/workflows/reusable_simple_job.yml
index 247569c4f527..c13b6c88027e 100644
--- a/.github/workflows/reusable_simple_job.yml
+++ b/.github/workflows/reusable_simple_job.yml
@@ -66,7 +66,7 @@ jobs:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
clear-repository: true
ref: ${{ inputs.git_ref }}
diff --git a/.github/workflows/reusable_test.yml b/.github/workflows/reusable_test.yml
index e896239a5c30..5ff9ced10662 100644
--- a/.github/workflows/reusable_test.yml
+++ b/.github/workflows/reusable_test.yml
@@ -40,23 +40,79 @@ name: Testing workflow
type: string
default: "$GITHUB_WORKSPACE/tests/ci"
secrets:
- robot_git_token:
+ secret_envs:
+ description: if given, it's passed to the environments
required: false
- ci_db_url:
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws s3 bucket.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws s3 bucket.
+ required: true
+ CLICKHOUSE_TEST_STAT_LOGIN:
+ description: username for ci db.
+ required: true
+ CLICKHOUSE_TEST_STAT_PASSWORD:
+ description: password for ci db.
+ required: true
+ CLICKHOUSE_TEST_STAT_URL:
+ description: url for ci db.
+ required: true
+ DOCKER_PASSWORD:
+ description: token to upload docker images.
+ required: true
+ ROBOT_TOKEN:
+ description: token to update ci status.
+ required: true
+ AZURE_ACCOUNT_NAME:
+ description: Azure storage account name
required: false
- ci_db_password:
+ AZURE_STORAGE_KEY:
+ description: Azure storage access key
+ required: false
+ AZURE_CONTAINER_NAME:
+ description: Azure container name
required: false
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
jobs:
+ runner_labels_setup:
+ name: Compute proper runner labels for the rest of the jobs
+ runs-on: ubuntu-latest
+ outputs:
+ runner_labels: ${{ steps.setVariables.outputs.runner_labels }}
+ steps:
+ - id: setVariables
+ name: Prepare runner_labels variables for the later steps
+ run: |
+
+ # Prepend self-hosted
+ input="self-hosted, altinity-on-demand, ${input}"
+
+ # Remove all whitespace
+ input="$(echo ${input} | tr -d [:space:])"
+ # Make something like a JSON array from comma-separated list
+ input="[ '${input//\,/\'\, \'}' ]"
+
+ echo "runner_labels=$input" >> ${GITHUB_OUTPUT}
+ env:
+ input: ${{ inputs.runner_type }}
+
Test:
- runs-on: [self-hosted, '${{inputs.runner_type}}']
- if: ${{ !failure() && !cancelled() && contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.test_name) }}
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
@@ -66,7 +122,7 @@ jobs:
batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
@@ -78,23 +134,28 @@ jobs:
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
+ ${{secrets.secret_envs}}
DOCKER_TAG<> "$GITHUB_ENV"
+ echo "AZURE_STORAGE_KEY=${{ secrets.AZURE_STORAGE_KEY }}" >> "$GITHUB_ENV"
+ echo "AZURE_CONTAINER_NAME=${{ secrets.AZURE_CONTAINER_NAME }}" >> "$GITHUB_ENV"
+ echo "AZURE_STORAGE_ACCOUNT_URL=https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" >> "$GITHUB_ENV"
+ echo "Configured Azure credentials"
+ fi
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: ${{ inputs.test_name }}
- name: Setup batch
if: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 }}
run: |
@@ -112,6 +173,7 @@ jobs:
--infile ${{ toJson(inputs.data) }} \
--job-name '${{inputs.test_name}}' \
--run \
+ --force \
--run-command '''${{inputs.run_command}}'''
# shellcheck disable=SC2319
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
@@ -123,6 +185,16 @@ jobs:
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
+ - name: Update workflow report
+ if: ${{ !cancelled() }}
+ uses: ./.github/actions/create_workflow_report
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ with:
+ final: false
- name: Clean
if: always()
uses: ./.github/actions/clean
diff --git a/.github/workflows/scheduled_runs.yml b/.github/workflows/scheduled_runs.yml
new file mode 100644
index 000000000000..dae3d1e25d9b
--- /dev/null
+++ b/.github/workflows/scheduled_runs.yml
@@ -0,0 +1,55 @@
+name: Scheduled Altinity Stable Builds
+
+on:
+ schedule:
+ - cron: '0 0 * * 6' #Weekly run for stable versions
+ - cron: '0 0 * * *' #Daily run for antalya versions
+ # Make sure that any changes to this file is actually tested with PRs
+ pull_request:
+ types:
+ - synchronize
+ - reopened
+ - opened
+ paths:
+ - '**/scheduled_runs.yml'
+
+jobs:
+ DailyRuns:
+ strategy:
+ fail-fast: false
+ matrix:
+ branch:
+ - antalya
+ name: ${{ matrix.branch }}
+ if: github.event.schedule != '0 0 * * 6'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Run ${{ matrix.branch }} workflow
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.TOKEN }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \
+ -d '{"ref":"${{ matrix.branch }}"}'
+
+ WeeklyRuns:
+ strategy:
+ fail-fast: false
+ matrix:
+ branch:
+ - customizations/24.8.11
+ name: ${{ matrix.branch }}
+ if: github.event.schedule != '0 0 * * *'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Run ${{ matrix.branch }} workflow
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.TOKEN }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \
+ -d '{"ref":"${{ matrix.branch }}"}'
diff --git a/.github/workflows/sign_and_release.yml b/.github/workflows/sign_and_release.yml
new file mode 100644
index 000000000000..c00dd4c9f43c
--- /dev/null
+++ b/.github/workflows/sign_and_release.yml
@@ -0,0 +1,426 @@
+name: Sign and Release packages
+
+on:
+ workflow_dispatch:
+ inputs:
+ workflow_url:
+ description: 'The URL to the workflow run that produced the packages'
+ required: true
+ release_environment:
+ description: 'The environment to release to. "staging" or "production"'
+ required: true
+ default: 'staging'
+ package_version:
+ description: 'The version of the package to release'
+ required: true
+ type: string
+ GPG_PASSPHRASE:
+ description: 'GPG passphrase for signing (required for production releases)'
+ required: false
+ type: string
+
+env:
+ AWS_REGION: us-east-1
+ S3_STORAGE_BUCKET: altinity-test-reports
+
+jobs:
+ extract-package-info:
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ steps:
+ - name: Download artifact "build_report_package_release"
+ run: gh run download "$(echo "${{ inputs.workflow_url }}" | awk -F'/' '{print $NF}')" -n build_report_package_release
+
+ - name: Unzip Artifact
+ run: |
+ # Locate the downloaded zip file.
+ ZIP_FILE=$(ls | grep "build_report_package_release.*\.zip" | head -n 1)
+ if [ -z "$ZIP_FILE" ]; then
+ echo "No zip file found for the artifact."
+ exit 1
+ fi
+ echo "Found zip file: ${ZIP_FILE}"
+ unzip -o "$ZIP_FILE" -d artifact
+
+ - name: Extract JSON File
+ run: |
+ cd artifact
+ # Find the JSON file with a name like build_report...package_release.json
+ JSON_FILE=$(ls | grep "build_report.*package_release\.json" | head -n 1)
+ if [ -z "$JSON_FILE" ]; then
+ echo "No JSON file matching the pattern was found."
+ exit 1
+ fi
+ echo "Found JSON file: ${JSON_FILE}"
+
+ - name: Parse JSON file
+ run: |
+ # Use jq to select the URL that ends with clickhouse-client-*-amd64.tgz
+ CLIENT_URL=$(jq -r '.build_urls[] | select(test("clickhouse-client-.*-amd64\\.tgz$"))' "$JSON_FILE")
+ if [ -z "$CLIENT_URL" ]; then
+ echo "No matching client URL found in JSON."
+ exit 1
+ fi
+ echo "Found client URL: ${CLIENT_URL}"
+
+ - name: Extract information
+ run: |
+ if ! [[ "$CLIENT_URL" =~ https://s3\.amazonaws\.com/([^/]+)/([^/]+)/([^/]+)/([^/]+)/([^/]+)/clickhouse-client-([^-]+)-amd64\.tgz$ ]]; then
+ echo "The client URL did not match the expected pattern."
+ exit 1
+ fi
+
+ - name: Process information
+ run: |
+ SRC_BUCKET="${BASH_REMATCH[1]}"
+ PACKAGE_VERSION="${BASH_REMATCH[6]}"
+ FOLDER_TIME=$(date -u +"%Y-%m-%dT%H-%M-%S.%3N")
+
+ if [[ "${BASH_REMATCH[2]}" == "PRs" ]]; then
+ SRC_DIR="${BASH_REMATCH[2]}/${BASH_REMATCH[3]}"
+ COMMIT_HASH="${BASH_REMATCH[4]}"
+ PR_NUMBER="${BASH_REMATCH[3]}"
+ DOCKER_VERSION="${PR_NUMBER}"
+ TEST_RESULTS_SRC="${PR_NUMBER}"
+ else
+ SRC_DIR="${BASH_REMATCH[2]}"
+ COMMIT_HASH="${BASH_REMATCH[3]}"
+ DOCKER_VERSION="0"
+ TEST_RESULTS_SRC="0"
+ fi
+
+ - name: Verify package version
+ run: |
+ if [ "$PACKAGE_VERSION" != "${{ inputs.package_version }}" ]; then
+ echo "Error: Extracted package version ($PACKAGE_VERSION) does not match input package version (${{ inputs.package_version }})"
+ exit 1
+ fi
+
+ - name: Extract major version and determine if binary processing is needed
+ run: |
+ MAJOR_VERSION=$(echo "$PACKAGE_VERSION" | cut -d. -f1)
+ if [ "$MAJOR_VERSION" -ge 24 ]; then
+ NEEDS_BINARY_PROCESSING="true"
+ else
+ NEEDS_BINARY_PROCESSING="false"
+ fi
+
+ - name: Extract feature and set repo prefix
+ run: |
+ # Extract the feature from PACKAGE_VERSION (everything after the last dot)
+ ALTINITY_BUILD_FEATURE=$(echo "$PACKAGE_VERSION" | rev | cut -d. -f1 | rev)
+ echo "ALTINITY_BUILD_FEATURE=${ALTINITY_BUILD_FEATURE}" >> $GITHUB_ENV
+
+ # Set REPO_PREFIX based on the feature
+ case "$ALTINITY_BUILD_FEATURE" in
+ "altinityhotfix")
+ echo "REPO_PREFIX=hotfix-" >> $GITHUB_ENV
+ ;;
+ "altinityfips")
+ echo "REPO_PREFIX=fips-" >> $GITHUB_ENV
+ ;;
+ "altinityantalya")
+ echo "REPO_PREFIX=antalya-" >> $GITHUB_ENV
+ ;;
+ "altinitystable"|"altinitytest")
+ echo "REPO_PREFIX=" >> $GITHUB_ENV
+ ;;
+ *)
+ echo "Build feature not supported: ${ALTINITY_BUILD_FEATURE}"
+ exit 1
+ ;;
+ esac
+
+ - name: Check extracted information
+ run: |
+ echo "Extracted information:"
+ echo "altinity_build_feature: ${ALTINITY_BUILD_FEATURE}"
+ echo "commit_hash: ${COMMIT_HASH}"
+ echo "docker_version: ${DOCKER_VERSION}"
+ echo "folder_time: ${FOLDER_TIME}"
+ echo "major_version: ${MAJOR_VERSION}"
+ echo "needs_binary_processing: ${NEEDS_BINARY_PROCESSING}"
+ echo "package_version: ${PACKAGE_VERSION}"
+ echo "repo_prefix: ${REPO_PREFIX}"
+ echo "src_bucket: ${SRC_BUCKET}"
+ echo "src_dir: ${SRC_DIR}"
+ echo "test_results_src: ${TEST_RESULTS_SRC}"
+
+ - name: Set environment variables
+ run: |
+ # Set environment variables for use in subsequent jobs
+ echo "COMMIT_HASH=${COMMIT_HASH}" >> $GITHUB_ENV
+ echo "DOCKER_VERSION=${DOCKER_VERSION}" >> $GITHUB_ENV
+ echo "FOLDER_TIME=${FOLDER_TIME}" >> $GITHUB_ENV
+ echo "NEEDS_BINARY_PROCESSING=${NEEDS_BINARY_PROCESSING}" >> $GITHUB_ENV
+ echo "PACKAGE_VERSION=${PACKAGE_VERSION}" >> $GITHUB_ENV
+ echo "SRC_BUCKET=${SRC_BUCKET}" >> $GITHUB_ENV
+ echo "SRC_DIR=${SRC_DIR}" >> $GITHUB_ENV
+ echo "TEST_RESULTS_SRC=${TEST_RESULTS_SRC}" >> $GITHUB_ENV
+ echo "SRC_URL=s3://${SRC_BUCKET}/${SRC_DIR}/${COMMIT_HASH}" >> $GITHUB_ENV
+ echo "DEST_URL=s3://${S3_STORAGE_BUCKET}/builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" >> $GITHUB_ENV
+
+ copy-packages:
+ needs: extract-package-info
+ runs-on: self-hosted, altinity-style-checker
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ steps:
+ - name: Download signed hash artifacts
+ run: |
+ # Download both signed hash artifacts
+ gh run download "$(echo "${{ inputs.workflow_url }}" | awk -F'/' '{print $NF}')" -n "Sign release signed-hashes"
+ gh run download "$(echo "${{ inputs.workflow_url }}" | awk -F'/' '{print $NF}')" -n "Sign aarch64 signed-hashes"
+
+ # Unzip both artifacts
+ for zip in *signed-hashes*.zip; do
+ unzip -o "$zip" -d signed-hashes
+ done
+
+ - name: Copy ARM packages
+ run: |
+ if ! aws s3 sync "${SRC_URL}/package_aarch64/" "${DEST_URL}/packages/ARM_PACKAGES/""; then
+ echo "Failed to copy AMD packages"
+ exit 1
+ fi
+
+ - name: Verify ARM packages
+ run: |
+ cd signed-hashes
+ for file in ../${DEST_URL}/packages/ARM_PACKAGES/**/*; do
+ if [ -f "$file" ]; then
+ echo "Verifying $file..."
+ if ! gpg --verify "Sign aarch64 signed-hashes/$(basename "$file").sha256.gpg" 2>/dev/null; then
+ echo "GPG verification failed for $file"
+ exit 1
+ fi
+ if ! sha256sum -c "Sign aarch64 signed-hashes/$(basename "$file").sha256.gpg" 2>/dev/null; then
+ echo "SHA256 verification failed for $file"
+ exit 1
+ fi
+ fi
+ done
+
+ - name: Separate ARM binary
+ run: |
+ aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse"
+ aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse-stripped"
+
+ - name: Copy AMD packages
+ run: |
+ if ! aws s3 sync "${SRC_URL}/package_release/" "${DEST_URL}/packages/AMD_PACKAGES/"; then
+ echo "Failed to copy AMD packages"
+ exit 1
+ fi
+
+ - name: Verify AMD packages
+ run: |
+ cd signed-hashes
+ for file in ../${DEST_URL}/packages/AMD_PACKAGES/**/*; do
+ if [ -f "$file" ]; then
+ echo "Verifying $file..."
+ if ! gpg --verify "Sign release signed-hashes/$(basename "$file").sha256.gpg" 2>/dev/null; then
+ echo "GPG verification failed for $file"
+ exit 1
+ fi
+ if ! sha256sum -c "Sign release signed-hashes/$(basename "$file").sha256.gpg" 2>/dev/null; then
+ echo "SHA256 verification failed for $file"
+ exit 1
+ fi
+ fi
+ done
+
+ - name: Separate AMD binary
+ run: |
+ aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse"
+ aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse-stripped"
+
+ - name: Process ARM binary
+ if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }}
+ run: |
+ echo "Downloading clickhouse binary..."
+ if ! aws s3 cp "${SRC_URL}/package_release/clickhouse" clickhouse; then
+ echo "Failed to download clickhouse binary"
+ exit 1
+ fi
+ chmod +x clickhouse
+
+ echo "Running clickhouse binary..."
+ ./clickhouse -q'q'
+
+ echo "Stripping the binary..."
+ strip clickhouse -o clickhouse-stripped
+
+ echo "Uploading processed binaries..."
+ if ! aws s3 cp clickhouse "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/non-self-extracting/"; then
+ echo "Failed to upload clickhouse binary"
+ exit 1
+ fi
+ if ! aws s3 cp clickhouse-stripped "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/non-self-extracting/"; then
+ echo "Failed to upload stripped clickhouse binary"
+ exit 1
+ fi
+
+ copy-test-results:
+ needs: extract-package-info
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ steps:
+ - name: Copy test results to S3
+ run: |
+ # Copy test results
+ echo "Copying test results..."
+ if ! aws s3 sync "s3://${SRC_BUCKET}/${TEST_RESULTS_SRC}/${COMMIT_HASH}" \
+ "${DEST_URL}/test_results/"; then
+ echo "Failed to copy test results"
+ exit 1
+ fi
+
+ publish-docker:
+ needs: extract-package-info
+ strategy:
+ matrix:
+ image_type: [server, keeper]
+ variant: ['', '-alpine']
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ steps:
+ - name: Publish Docker Image
+ id: publish
+ uses: ./.github/workflows/docker_publish.yml
+ with:
+ docker_image: altinityinfra/clickhouse-${{ matrix.image_type }}:${{ env.DOCKER_VERSION }}-${{ env.PACKAGE_VERSION }}${{ matrix.variant }}
+ release_environment: ${{ inputs.release_environment }}
+ upload_artifacts: false
+ secrets: inherit
+
+ - name: Upload Docker images to S3
+ run: |
+ # Upload Docker images to S3
+ echo "Uploading Docker images to S3..."
+ if ! aws s3 sync "${{ steps.publish.outputs.image_archives_path }}/" \
+ "${DEST_URL}/docker_images/${{ matrix.image_type }}${{ matrix.variant }}/"; then
+ echo "Failed to upload Docker images"
+ exit 1
+ fi
+
+ sign-and-publish:
+ needs: [copy-packages]
+ runs-on: arc-runners-clickhouse-signer
+ env:
+ GPG_PASSPHRASE: ${{ inputs.release_environment == 'production' && inputs.GPG_PASSPHRASE || secrets.GPG_PASSPHRASE }}
+ REPO_DNS_NAME: ${{ inputs.release_environment == 'production' && 'builds.altinity.cloud' || 'builds.staging.altinity.cloud' }}
+ REPO_NAME: ${{ inputs.release_environment == 'production' && 'altinity' || 'altinity-staging' }}
+ REPO_SUBTITLE: ${{ inputs.release_environment == 'production' && 'Stable Builds' || 'Staging Builds' }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/ClickHouse
+ ref: antalya
+ path: ClickHouse
+
+ - name: Download packages
+ run: |
+ if ! aws s3 cp "${DEST_URL}/packages/ARM_PACKAGES/" $RUNNER_TEMP/packages --recursive; then
+ echo "Failed to download ARM packages"
+ exit 1
+ fi
+ if ! aws s3 cp "${DEST_URL}/packages/AMD_PACKAGES/" $RUNNER_TEMP/packages --recursive; then
+ echo "Failed to download AMD packages"
+ exit 1
+ fi
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+
+ - name: Process ARM binary
+ if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }}
+ run: |
+ chmod +x $RUNNER_TEMP/packages/arm-bin/clickhouse
+
+ echo "Running clickhouse binary..."
+ $RUNNER_TEMP/packages/arm-bin/clickhouse -q'q'
+
+ echo "Stripping the binary..."
+ strip $RUNNER_TEMP/packages/arm-bin/non-self-extracting/clickhouse -o $RUNNER_TEMP/packages/arm-bin/non-self-extracting/clickhouse-stripped
+
+ - name: Setup GPG
+ run: |
+ if [ -z ${GPG_PASSPHRASE} ]
+ then
+ echo "GPG_PASSPHRASE is not set"
+ exit 1
+ fi
+
+ - name: Process GPG key
+ run: |
+ echo "Processing GPG key..."
+ if ! aws secretsmanager get-secret-value --secret-id arn:aws:secretsmanager:us-east-1:446527654354:secret:altinity_staging_gpg-Rqbe8S --query SecretString --output text | sed -e "s/^'//" -e "s/'$//" | jq -r '.altinity_staging_gpg | @base64d' | gpg --batch --import; then
+ echo "Failed to import GPG key"
+ exit 1
+ fi
+ gpg --list-secret-keys --with-keygrip
+ gpgconf --kill gpg-agent
+ gpg-agent --daemon --allow-preset-passphrase
+ if ! aws ssm get-parameter --name /gitlab-runner/key-encrypting-key --with-decryption --query Parameter.Value --output text | sudo tee /root/.key-encrypting-key >/dev/null; then
+ echo "Failed to get key encrypting key"
+ exit 1
+ fi
+ GPG_KEY_NAME=$(gpg --list-secret-keys | grep uid | head --lines 1 | tr -s " " | cut -d " " -f 4-)
+ GPG_KEY_ID=$(gpg --list-secret-keys --with-keygrip "${GPG_KEY_NAME}" | grep Keygrip | head --lines 1 | tr -s " " | cut -d " " -f 4)
+ echo "$GPG_PASSPHRASE" | base64 -d | sudo openssl enc -d -aes-256-cbc -pbkdf2 -pass file:/root/.key-encrypting-key -in - -out - | /usr/lib/gnupg/gpg-preset-passphrase --preset $GPG_KEY_ID
+
+ - name: Run Ansible playbook
+ run: |
+ echo "Running Ansible playbook for signing and publishing..."
+ echo "ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml -e aws_region=$AWS_REGION -e gpg_key_id=\"$GPG_KEY_ID\" -e gpg_key_name=\"$GPG_KEY_NAME\"-e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" -e pkgver=\"${PACKAGE_VERSION}\" -e release_environment=$RELEASE_ENVIRONMENT -e repo_dns_name=$REPO_DNS_NAME -e repo_name=$REPO_NAME -e repo_prefix=\"$REPO_PREFIX\" -e repo_subtitle=\"$REPO_SUBTITLE\" -e s3_pkgs_bucket=$S3_STORAGE_BUCKET -e s3_pkgs_path=\"builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}\" -e repo_path=\"/home/runner/.cache/${{ inputs.release_environment }}\" ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml "
+ if ! ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml \
+ -e aws_region=$AWS_REGION \
+ -e gpg_key_id="$GPG_KEY_ID" \
+ -e gpg_key_name="$GPG_KEY_NAME" \
+ -e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" \
+ -e pkgver="${PACKAGE_VERSION}" \
+ -e release_environment=$RELEASE_ENVIRONMENT \
+ -e repo_dns_name=$REPO_DNS_NAME \
+ -e repo_name=$REPO_NAME \
+ -e repo_prefix="$REPO_PREFIX" \
+ -e repo_subtitle="$REPO_SUBTITLE" \
+ -e s3_pkgs_bucket=$S3_STORAGE_BUCKET \
+ -e s3_pkgs_path="builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" \
+ ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml; then
+ echo "Ansible playbook failed"
+ exit 1
+ fi
+ gpgconf --kill gpg-agent
+ ls -hal
+
+ - name: Cleanup temporary files
+ if: always()
+ run: |
+ echo "Cleaning up temporary files..."
+ rm -f $RUNNER_TEMP/clickhouse* || true
+
+ repo-sanity-check:
+ needs: sign-and-publish
+ uses: Altinity/ClickHouse/.github/workflows/repo-sanity-checks.yml@antalya
+
+ copy-to-released:
+ needs: [sign-and-publish]
+ if: ${{ inputs.release_environment == 'production' }}
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ steps:
+ - name: Copy to released directory
+ run: |
+ echo "Copying to released directory..."
+ if ! aws s3 sync "${DEST_URL}/" "s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/"; then
+ echo "Failed to copy to released directory"
+ exit 1
+ fi
diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt
index 39e0be84d22c..79a0ad713580 100644
--- a/cmake/autogenerated_versions.txt
+++ b/cmake/autogenerated_versions.txt
@@ -7,6 +7,10 @@ SET(VERSION_MAJOR 25)
SET(VERSION_MINOR 3)
SET(VERSION_PATCH 6)
SET(VERSION_GITHASH 14e08ead34a7900d75e3d378f87cabfba9f8c8d9)
-SET(VERSION_DESCRIBE v25.3.6.1-lts)
-SET(VERSION_STRING 25.3.6.1)
+#10000 for altinitystable candidates
+#20000 for altinityedge candidates
+SET(VERSION_TWEAK 10000)
+SET(VERSION_FLAVOUR altinitytest)
+SET(VERSION_DESCRIBE v25.3.6.10000.altinitytest)
+SET(VERSION_STRING 25.3.6.10000.altinitytest)
# end of autochange
diff --git a/cmake/version.cmake b/cmake/version.cmake
index 9ca21556f4d4..b008c989c0b0 100644
--- a/cmake/version.cmake
+++ b/cmake/version.cmake
@@ -3,9 +3,10 @@ include(${PROJECT_SOURCE_DIR}/cmake/autogenerated_versions.txt)
set(VERSION_EXTRA "" CACHE STRING "")
set(VERSION_TWEAK "" CACHE STRING "")
-if (VERSION_TWEAK)
- string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_TWEAK})
-endif ()
+# NOTE(vnemkov): we rely on VERSION_TWEAK portion to be already present in VERSION_STRING
+# if (VERSION_TWEAK)
+# string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_TWEAK})
+# endif ()
if (VERSION_EXTRA)
string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_EXTRA})
@@ -19,5 +20,5 @@ set (VERSION_STRING_SHORT "${VERSION_MAJOR}.${VERSION_MINOR}")
math (EXPR VERSION_INTEGER "${VERSION_PATCH} + ${VERSION_MINOR}*1000 + ${VERSION_MAJOR}*1000000")
if(CLICKHOUSE_OFFICIAL_BUILD)
- set(VERSION_OFFICIAL " (official build)")
+ set(VERSION_OFFICIAL " (altinity build)")
endif()
diff --git a/contrib/google-protobuf-cmake/CMakeLists.txt b/contrib/google-protobuf-cmake/CMakeLists.txt
index 9df9d3e00268..ea99a20c9731 100644
--- a/contrib/google-protobuf-cmake/CMakeLists.txt
+++ b/contrib/google-protobuf-cmake/CMakeLists.txt
@@ -369,6 +369,7 @@ else ()
execute_process(
COMMAND ${CMAKE_COMMAND}
"-G${CMAKE_GENERATOR}"
+ "-DCMAKE_POLICY_VERSION_MINIMUM=3.5"
"-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
diff --git a/contrib/grpc-cmake/CMakeLists.txt b/contrib/grpc-cmake/CMakeLists.txt
index 6dfa9a39583c..52cc8458caf5 100644
--- a/contrib/grpc-cmake/CMakeLists.txt
+++ b/contrib/grpc-cmake/CMakeLists.txt
@@ -78,6 +78,7 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
execute_process(
COMMAND ${CMAKE_COMMAND}
"-G${CMAKE_GENERATOR}"
+ "-DCMAKE_POLICY_VERSION_MINIMUM=3.5"
"-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
@@ -134,6 +135,7 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
execute_process(
COMMAND ${CMAKE_COMMAND}
"-G${CMAKE_GENERATOR}"
+ "-DCMAKE_POLICY_VERSION_MINIMUM=3.5"
"-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
diff --git a/contrib/openssl-cmake/CMakeLists.txt b/contrib/openssl-cmake/CMakeLists.txt
index 6f44a93b0300..5ffc6f2f5d2b 100644
--- a/contrib/openssl-cmake/CMakeLists.txt
+++ b/contrib/openssl-cmake/CMakeLists.txt
@@ -8,6 +8,9 @@ if(NOT ENABLE_SSL)
return()
endif()
+project(ch-openssl)
+cmake_minimum_required(VERSION 3.5)
+
# Below build description was generated from these steps:
# - Checkout OpenSSL in the desired version (e.g. 3.2)
# - Take a brief look (but not too long to save your mental sanity) at the supported build options (*)
diff --git a/contrib/sparse-checkout/update-aws.sh b/contrib/sparse-checkout/update-aws.sh
index 3b449e6729a3..19820bd8dcfa 100755
--- a/contrib/sparse-checkout/update-aws.sh
+++ b/contrib/sparse-checkout/update-aws.sh
@@ -8,6 +8,7 @@ echo '!/*/*' >> $FILES_TO_CHECKOUT
echo '/src/aws-cpp-sdk-core/*' >> $FILES_TO_CHECKOUT
echo '/generated/src/aws-cpp-sdk-s3/*' >> $FILES_TO_CHECKOUT
echo '/generated/src/aws-cpp-sdk-aws/*' >> $FILES_TO_CHECKOUT
+echo '/generated/src/aws-cpp-sdk-glue/*' >> $FILES_TO_CHECKOUT
git config core.sparsecheckout true
git checkout $1
diff --git a/docker/docs/builder/Dockerfile b/docker/docs/builder/Dockerfile
index ad69ea1aec09..c7664d54eae9 100644
--- a/docker/docs/builder/Dockerfile
+++ b/docker/docs/builder/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/docs-builder .
+# docker build -t altinityinfra/docs-builder .
FROM golang:alpine AS htmltest-builder
ARG HTMLTEST_VERSION=0.17.0
diff --git a/docker/images.json b/docker/images.json
index 942cf9606c69..91d53ea2cb0d 100644
--- a/docker/images.json
+++ b/docker/images.json
@@ -1,122 +1,121 @@
{
"docker/packager/binary-builder": {
- "name": "clickhouse/binary-builder",
+ "name": "altinityinfra/binary-builder",
"dependent": []
},
"docker/packager/cctools": {
- "name": "clickhouse/cctools",
+ "name": "altinityinfra/cctools",
"dependent": []
},
"docker/test/compatibility/centos": {
- "name": "clickhouse/test-old-centos",
+ "name": "altinityinfra/test-old-centos",
"dependent": []
},
"docker/test/compatibility/ubuntu": {
- "name": "clickhouse/test-old-ubuntu",
+ "name": "altinityinfra/test-old-ubuntu",
"dependent": []
},
"docker/test/integration/base": {
- "name": "clickhouse/integration-test",
- "dependent": [
+ "only_amd64": true,
+ "name": "altinityinfra/integration-test",
+ "dependent": [
"docker/test/integration/clickhouse_with_unity_catalog"
]
},
"docker/test/fuzzer": {
- "name": "clickhouse/fuzzer",
+ "name": "altinityinfra/fuzzer",
"dependent": []
},
"docker/test/libfuzzer": {
- "name": "clickhouse/libfuzzer",
+ "name": "altinityinfra/libfuzzer",
"dependent": []
},
"docker/test/performance-comparison": {
- "name": "clickhouse/performance-comparison",
+ "name": "altinityinfra/performance-comparison",
"dependent": []
},
"docker/test/util": {
- "name": "clickhouse/test-util",
+ "name": "altinityinfra/test-util",
"dependent": [
"docker/test/base",
"docker/test/fasttest"
]
},
"docker/test/stateless": {
- "name": "clickhouse/stateless-test",
+ "name": "altinityinfra/stateless-test",
"dependent": [
"docker/test/stateful"
]
},
"docker/test/stateful": {
- "name": "clickhouse/stateful-test",
+ "name": "altinityinfra/stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/unit": {
- "name": "clickhouse/unit-test",
+ "name": "altinityinfra/unit-test",
"dependent": []
},
"docker/test/stress": {
- "name": "clickhouse/stress-test",
+ "name": "altinityinfra/stress-test",
"dependent": []
},
"docker/test/integration/runner": {
- "name": "clickhouse/integration-tests-runner",
+ "only_amd64": true,
+ "name": "altinityinfra/integration-tests-runner",
"dependent": []
},
"docker/test/fasttest": {
- "name": "clickhouse/fasttest",
+ "name": "altinityinfra/fasttest",
"dependent": [
"docker/packager/binary-builder"
]
},
- "docker/test/style": {
- "name": "clickhouse/style-test",
- "dependent": []
- },
"docker/test/integration/s3_proxy": {
- "name": "clickhouse/s3-proxy",
+ "name": "altinityinfra/s3-proxy",
"dependent": []
},
"docker/test/integration/resolver": {
- "name": "clickhouse/python-bottle",
+ "name": "altinityinfra/python-bottle",
"dependent": []
},
"docker/test/integration/helper_container": {
- "name": "clickhouse/integration-helper",
+ "only_amd64": true,
+ "name": "altinityinfra/integration-helper",
"dependent": []
},
"docker/test/integration/mysql_golang_client": {
- "name": "clickhouse/mysql-golang-client",
+ "name": "altinityinfra/mysql-golang-client",
"dependent": []
},
"docker/test/integration/dotnet_client": {
- "name": "clickhouse/dotnet-client",
+ "name": "altinityinfra/dotnet-client",
"dependent": []
},
"docker/test/integration/mysql_java_client": {
- "name": "clickhouse/mysql-java-client",
+ "name": "altinityinfra/mysql-java-client",
"dependent": []
},
"docker/test/integration/mysql_js_client": {
- "name": "clickhouse/mysql-js-client",
+ "name": "altinityinfra/mysql-js-client",
"dependent": []
},
"docker/test/integration/mysql_php_client": {
- "name": "clickhouse/mysql-php-client",
+ "name": "altinityinfra/mysql-php-client",
"dependent": []
},
"docker/test/integration/postgresql_java_client": {
- "name": "clickhouse/postgresql-java-client",
+ "name": "altinityinfra/postgresql-java-client",
"dependent": []
},
"docker/test/integration/kerberos_kdc": {
"only_amd64": true,
- "name": "clickhouse/kerberos-kdc",
+ "name": "altinityinfra/kerberos-kdc",
"dependent": []
},
"docker/test/base": {
- "name": "clickhouse/test-base",
+ "name": "altinityinfra/test-base",
"dependent": [
"docker/test/clickbench",
"docker/test/fuzzer",
@@ -131,47 +130,43 @@
]
},
"docker/test/sqlancer": {
- "name": "clickhouse/sqlancer-test",
+ "name": "altinityinfra/sqlancer-test",
"dependent": []
},
"docker/test/keeper-jepsen": {
- "name": "clickhouse/keeper-jepsen-test",
+ "name": "altinityinfra/keeper-jepsen-test",
"dependent": []
},
"docker/test/server-jepsen": {
- "name": "clickhouse/server-jepsen-test",
+ "name": "altinityinfra/server-jepsen-test",
"dependent": []
},
"docker/test/clickbench": {
- "name": "clickhouse/clickbench",
+ "name": "altinityinfra/clickbench",
"dependent": []
},
"docker/test/install/deb": {
- "name": "clickhouse/install-deb-test",
+ "name": "altinityinfra/install-deb-test",
"dependent": []
},
"docker/test/install/rpm": {
- "name": "clickhouse/install-rpm-test",
- "dependent": []
- },
- "docker/docs/builder": {
- "name": "clickhouse/docs-builder",
+ "name": "altinityinfra/install-rpm-test",
"dependent": []
},
"docker/test/sqllogic": {
- "name": "clickhouse/sqllogic-test",
+ "name": "altinityinfra/sqllogic-test",
"dependent": []
},
"docker/test/sqltest": {
- "name": "clickhouse/sqltest",
+ "name": "altinityinfra/sqltest",
"dependent": []
},
"docker/test/integration/nginx_dav": {
- "name": "clickhouse/nginx-dav",
+ "name": "altinityinfra/nginx-dav",
"dependent": []
},
"docker/test/integration/clickhouse_with_unity_catalog": {
- "name": "clickhouse/integration-test-with-unity-catalog",
+ "name": "altinityinfra/integration-test-with-unity-catalog",
"dependent": []
}
}
diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile
index ad94703045ed..8f8cc46285b7 100644
--- a/docker/keeper/Dockerfile
+++ b/docker/keeper/Dockerfile
@@ -16,7 +16,7 @@ RUN arch=${TARGETARCH:-amd64} \
esac
-FROM alpine
+FROM alpine:3.21
ENV LANG=en_US.UTF-8 \
LANGUAGE=en_US:en \
diff --git a/docker/packager/binary-builder/Dockerfile b/docker/packager/binary-builder/Dockerfile
index 648d8638aa2e..7e2aabeb7d5d 100644
--- a/docker/packager/binary-builder/Dockerfile
+++ b/docker/packager/binary-builder/Dockerfile
@@ -1,11 +1,13 @@
-# docker build -t clickhouse/binary-builder .
+# docker build -t altinityinfra/binary-builder .
ARG FROM_TAG=latest
-FROM clickhouse/fasttest:$FROM_TAG
+FROM altinityinfra/fasttest:$FROM_TAG
+# NOTE(strtgbb) Not sure where LLVM_VERSION is set, so we set it here
+ENV LLVM_VERSION=19
ENV CC=clang-${LLVM_VERSION}
ENV CXX=clang++-${LLVM_VERSION}
# If the cctools is updated, then first build it in the CI, then update here in a different commit
-COPY --from=clickhouse/cctools:d9e3596e706b /cctools /cctools
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /cctools /cctools
# Rust toolchain and libraries
ENV RUSTUP_HOME=/rust/rustup
diff --git a/docker/packager/binary-builder/build.sh b/docker/packager/binary-builder/build.sh
index dc1837b869ec..bb9a5ea20882 100755
--- a/docker/packager/binary-builder/build.sh
+++ b/docker/packager/binary-builder/build.sh
@@ -176,7 +176,8 @@ then
git -C "$PERF_OUTPUT"/ch reset --soft pr
git -C "$PERF_OUTPUT"/ch log -5
# Unlike git log, git show requires trees
- git -C "$PERF_OUTPUT"/ch show -s
+ # NOTE(strtgbb) the show command fails in our ci - fatal: unable to read tree ...
+ # git -C "$PERF_OUTPUT"/ch show -s
(
cd "$PERF_OUTPUT"/..
tar -cv --zstd -f /output/performance.tar.zst output
diff --git a/docker/packager/cctools/Dockerfile b/docker/packager/cctools/Dockerfile
index 570a42d42d51..3555bf7c428b 100644
--- a/docker/packager/cctools/Dockerfile
+++ b/docker/packager/cctools/Dockerfile
@@ -1,10 +1,10 @@
-# docker build -t clickhouse/cctools .
+# docker build -t altinityinfra/cctools .
-# This is a hack to significantly reduce the build time of the clickhouse/binary-builder
+# This is a hack to significantly reduce the build time of the altinityinfra/binary-builder
# It's based on the assumption that we don't care of the cctools version so much
-# It event does not depend on the clickhouse/fasttest in the `docker/images.json`
+# It event does not depend on the altinityinfra/fasttest in the `docker/images.json`
ARG FROM_TAG=latest
-FROM clickhouse/fasttest:$FROM_TAG as builder
+FROM altinityinfra/fasttest:$FROM_TAG as builder
ENV CC=clang-${LLVM_VERSION}
ENV CXX=clang++-${LLVM_VERSION}
diff --git a/docker/packager/packager b/docker/packager/packager
index f720e6492f02..88bd6ae21d06 100755
--- a/docker/packager/packager
+++ b/docker/packager/packager
@@ -6,12 +6,13 @@ import os
import subprocess
import sys
from pathlib import Path
-from typing import List, Optional
+from typing import Dict, List, Optional
SCRIPT_PATH = Path(__file__).absolute()
IMAGE_TYPE = "binary-builder"
-IMAGE_NAME = f"clickhouse/{IMAGE_TYPE}"
-
+IMAGE_NAME = f"altinityinfra/{IMAGE_TYPE}"
+DEFAULT_TMP_PATH = SCRIPT_PATH.parent.absolute() / 'tmp'
+TEMP_PATH = Path(os.getenv("TEMP_PATH", DEFAULT_TMP_PATH))
class BuildException(Exception):
pass
@@ -68,9 +69,22 @@ def run_docker_image_with_env(
ch_root: Path,
cargo_cache_dir: Path,
ccache_dir: Optional[Path],
+ aws_secrets : Optional[Dict[str,str]]
) -> None:
output_dir.mkdir(parents=True, exist_ok=True)
cargo_cache_dir.mkdir(parents=True, exist_ok=True)
+ extra_parts = ""
+
+ if aws_secrets:
+ # Pass AWS credentials via file rather than via env to avoid leaking secrets
+ env_part = {"AWS_CONFIG_FILE": "/home/clickhouse/.aws/credentials"}
+ host_aws_config_file_path = Path(TEMP_PATH) / 'aws_config'
+ with open(host_aws_config_file_path, 'wt') as f:
+ f.write("[default]")
+ for key, value in aws_secrets.items():
+ f.write(f"\n{key}={value}")
+
+ extra_parts = f"--volume={host_aws_config_file_path}:{env_part['AWS_CONFIG_FILE']}"
env_part = " -e ".join(env_variables)
if env_part:
@@ -93,6 +107,7 @@ def run_docker_image_with_env(
cmd = (
f"docker run --network=host --user={user} --rm {ccache_mount} "
f"--volume={output_dir}:/output --volume={ch_root}:/build {env_part} "
+ f" {extra_parts} "
f"--volume={cargo_cache_dir}:/rust/cargo/registry {interactive} {image_name} /build/docker/packager/binary-builder/build.sh"
)
@@ -435,6 +450,14 @@ def parse_args() -> argparse.Namespace:
type=dir_name,
help="a directory with ccache",
)
+ parser.add_argument(
+ "--s3-access-key-id",
+ help="an S3 access key id used for sscache bucket",
+ )
+ parser.add_argument(
+ "--s3-secret-access-key",
+ help="an S3 secret access key used for sscache bucket",
+ )
parser.add_argument(
"--s3-bucket",
help="an S3 bucket used for sscache and clang-tidy-cache",
@@ -541,6 +564,10 @@ def main() -> None:
ch_root,
args.cargo_cache_dir,
args.ccache_dir,
+ {
+ "aws_access_key_id" : args.s3_access_key_id,
+ "aws_secret_access_key" : args.s3_secret_access_key
+ }
)
logging.info("Output placed into %s", args.output_dir)
diff --git a/docker/server/README.md b/docker/server/README.md
index 61e86b8a9b53..68e81956a700 100644
--- a/docker/server/README.md
+++ b/docker/server/README.md
@@ -193,4 +193,4 @@ EOSQL
## License
-View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
+View [license information](https://github.com/Altinity/ClickHouse/blob/antalya/LICENSE) for the software contained in this image.
diff --git a/docker/server/README.sh b/docker/server/README.sh
index 42fa72404d1f..0441d1e7f633 100755
--- a/docker/server/README.sh
+++ b/docker/server/README.sh
@@ -34,5 +34,5 @@ EOD
# Remove %%LOGO%% from the file with one line below
sed -i '/^%%LOGO%%/,+1d' "$R"
-# Replace each %%IMAGE%% with our `clickhouse/clickhouse-server`
-sed -i '/%%IMAGE%%/s:%%IMAGE%%:clickhouse/clickhouse-server:g' $R
+# Replace each %%IMAGE%% with our `altinity/clickhouse-server`
+sed -i '/%%IMAGE%%/s:%%IMAGE%%:altinity/clickhouse-server:g' $R
diff --git a/docker/server/README.src/github-repo b/docker/server/README.src/github-repo
index 70a009ec9588..721b5d7bc3f8 100644
--- a/docker/server/README.src/github-repo
+++ b/docker/server/README.src/github-repo
@@ -1 +1 @@
-https://github.com/ClickHouse/ClickHouse
+https://github.com/Altinity/ClickHouse/
diff --git a/docker/server/README.src/license.md b/docker/server/README.src/license.md
index 6be024edcdec..fac387a11c5b 100644
--- a/docker/server/README.src/license.md
+++ b/docker/server/README.src/license.md
@@ -1 +1 @@
-View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
+View [license information](https://github.com/Altinity/ClickHouse/blob/antalya/LICENSE) for the software contained in this image.
diff --git a/docker/server/README.src/logo.svg b/docker/server/README.src/logo.svg
index a50dd81a1645..886f4f0e4ddd 100644
--- a/docker/server/README.src/logo.svg
+++ b/docker/server/README.src/logo.svg
@@ -1,43 +1,17 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
+
+
+
+
+
+
+
+
diff --git a/docker/server/README.src/maintainer.md b/docker/server/README.src/maintainer.md
index 26c7db1a2934..90f15bb5337c 100644
--- a/docker/server/README.src/maintainer.md
+++ b/docker/server/README.src/maintainer.md
@@ -1 +1 @@
-[ClickHouse Inc.](%%GITHUB-REPO%%)
+[Altinity Inc.](%%GITHUB-REPO%%)
diff --git a/docker/test/README.md b/docker/test/README.md
index 563cfd837e95..baca52cd1149 100644
--- a/docker/test/README.md
+++ b/docker/test/README.md
@@ -2,4 +2,4 @@
## License
-View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
+View [license information](https://github.com/Altinity/ClickHouse/blob/antalya/LICENSE) for the software contained in this image.
diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile
index 2e9af0a4a2d4..26cd016a5abc 100644
--- a/docker/test/base/Dockerfile
+++ b/docker/test/base/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/test-base .
+# docker build -t altinityinfra/test-base .
ARG FROM_TAG=latest
-FROM clickhouse/test-util:$FROM_TAG
+FROM altinityinfra/test-util:$FROM_TAG
RUN apt-get update \
&& apt-get install \
diff --git a/docker/test/clickbench/Dockerfile b/docker/test/clickbench/Dockerfile
index 0b6b1736e031..214191a8b488 100644
--- a/docker/test/clickbench/Dockerfile
+++ b/docker/test/clickbench/Dockerfile
@@ -1,5 +1,5 @@
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV TZ=Europe/Amsterdam
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
diff --git a/docker/test/compatibility/centos/Dockerfile b/docker/test/compatibility/centos/Dockerfile
index 628609e374f6..1edb42422b1f 100644
--- a/docker/test/compatibility/centos/Dockerfile
+++ b/docker/test/compatibility/centos/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/test-old-centos .
+# docker build -t altinityinfra/test-old-centos .
FROM centos:5
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \
diff --git a/docker/test/compatibility/ubuntu/Dockerfile b/docker/test/compatibility/ubuntu/Dockerfile
index ddd0a76bd446..0eb283ff3daf 100644
--- a/docker/test/compatibility/ubuntu/Dockerfile
+++ b/docker/test/compatibility/ubuntu/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/test-old-ubuntu .
+# docker build -t altinityinfra/test-old-ubuntu .
FROM ubuntu:12.04
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \
diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile
index 264eb7bee326..c0a9c9b30bac 100644
--- a/docker/test/fasttest/Dockerfile
+++ b/docker/test/fasttest/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/fasttest .
+# docker build -t altinityinfra/fasttest .
ARG FROM_TAG=latest
-FROM clickhouse/test-util:$FROM_TAG
+FROM altinityinfra/test-util:$FROM_TAG
RUN apt-get update \
&& apt-get install \
@@ -66,6 +66,7 @@ RUN mkdir /tmp/ccache \
-DCMAKE_BUILD_TYPE=None \
-DZSTD_FROM_INTERNET=ON \
-DREDIS_STORAGE_BACKEND=OFF \
+ -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \
-Wno-dev \
-B build \
-S . \
diff --git a/docker/test/fuzzer/Dockerfile b/docker/test/fuzzer/Dockerfile
index e1fb09b8ed57..f79fa706e72e 100644
--- a/docker/test/fuzzer/Dockerfile
+++ b/docker/test/fuzzer/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/fuzzer .
+# docker build -t altinityinfra/fuzzer .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
@@ -41,5 +41,5 @@ CMD set -o pipefail \
&& cd /workspace \
&& timeout -s 9 1h /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
-# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer
+# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> altinityinfra/fuzzer
diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile
index dc4d470a2623..92d4ef9ec9c0 100644
--- a/docker/test/integration/base/Dockerfile
+++ b/docker/test/integration/base/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/integration-test .
+# docker build -t altinityinfra/integration-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
SHELL ["/bin/bash", "-c"]
@@ -73,5 +73,5 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \
ENV TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
-COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
diff --git a/docker/test/integration/clickhouse_with_unity_catalog/Dockerfile b/docker/test/integration/clickhouse_with_unity_catalog/Dockerfile
index f711d7258a9e..0dcaf2df80b9 100644
--- a/docker/test/integration/clickhouse_with_unity_catalog/Dockerfile
+++ b/docker/test/integration/clickhouse_with_unity_catalog/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/integration-test-with-unity-catalog .
+# docker build -t altinityinfra/integration-test-with-unity-catalog .
ARG FROM_TAG=latest
-FROM clickhouse/integration-test:$FROM_TAG
+FROM altinityinfra/integration-test:$FROM_TAG
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get -y install openjdk-17-jdk-headless && update-alternatives --config java && update-alternatives --config javac
diff --git a/docker/test/integration/helper_container/Dockerfile b/docker/test/integration/helper_container/Dockerfile
index 1084d087e53b..81d658705836 100644
--- a/docker/test/integration/helper_container/Dockerfile
+++ b/docker/test/integration/helper_container/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/integration-helper .
+# docker build -t altinityinfra/integration-helper .
# Helper docker container to run iptables without sudo
FROM alpine:3.18
diff --git a/docker/test/integration/kerberos_kdc/Dockerfile b/docker/test/integration/kerberos_kdc/Dockerfile
index a203c33a3313..a7f989bf4a56 100644
--- a/docker/test/integration/kerberos_kdc/Dockerfile
+++ b/docker/test/integration/kerberos_kdc/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/kerberos-kdc .
+# docker build -t altinityinfra/kerberos-kdc .
FROM centos:6
RUN sed -i '/^mirrorlist/s/^/#/;/^#baseurl/{s/#//;s/mirror.centos.org\/centos\/$releasever/vault.centos.org\/6.10/}' /etc/yum.repos.d/*B*
diff --git a/docker/test/integration/mysql_golang_client/Dockerfile b/docker/test/integration/mysql_golang_client/Dockerfile
index 5281f786ae2d..52be68126e47 100644
--- a/docker/test/integration/mysql_golang_client/Dockerfile
+++ b/docker/test/integration/mysql_golang_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-golang-client .
+# docker build -t altinityinfra/mysql-golang-client .
# MySQL golang client docker container
FROM golang:1.17
diff --git a/docker/test/integration/mysql_java_client/Dockerfile b/docker/test/integration/mysql_java_client/Dockerfile
index 38fefac070e7..5826ee77d501 100644
--- a/docker/test/integration/mysql_java_client/Dockerfile
+++ b/docker/test/integration/mysql_java_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-java-client .
+# docker build -t altinityinfra/mysql-java-client .
# MySQL Java client docker container
FROM openjdk:8-jdk-alpine
diff --git a/docker/test/integration/mysql_js_client/Dockerfile b/docker/test/integration/mysql_js_client/Dockerfile
index 4c9df10ace1c..2b821f243234 100644
--- a/docker/test/integration/mysql_js_client/Dockerfile
+++ b/docker/test/integration/mysql_js_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-js-client .
+# docker build -t altinityinfra/mysql-js-client .
# MySQL JavaScript client docker container
FROM node:16.14.2
diff --git a/docker/test/integration/mysql_php_client/Dockerfile b/docker/test/integration/mysql_php_client/Dockerfile
index 0e11ae023e63..b060e93f70a3 100644
--- a/docker/test/integration/mysql_php_client/Dockerfile
+++ b/docker/test/integration/mysql_php_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-php-client .
+# docker build -t altinityinfra/mysql-php-client .
# MySQL PHP client docker container
FROM php:8-cli-alpine
diff --git a/docker/test/integration/postgresql_java_client/Dockerfile b/docker/test/integration/postgresql_java_client/Dockerfile
index c5583085ef37..5a7458cc1d2f 100644
--- a/docker/test/integration/postgresql_java_client/Dockerfile
+++ b/docker/test/integration/postgresql_java_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/postgresql-java-client .
+# docker build -t altinityinfra/postgresql-java-client .
# PostgreSQL Java client docker container
FROM ubuntu:18.04
diff --git a/docker/test/integration/resolver/Dockerfile b/docker/test/integration/resolver/Dockerfile
index 423faf835ae1..1f639bb2793d 100644
--- a/docker/test/integration/resolver/Dockerfile
+++ b/docker/test/integration/resolver/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/python-bottle .
+# docker build -t altinityinfra/python-bottle .
# Helper docker container to run python bottle apps
# python cgi module is dropped in 3.13 - pin to 3.12
diff --git a/docker/test/integration/resolver/requirements.txt b/docker/test/integration/resolver/requirements.txt
index fbf852953296..314b112319b3 100644
--- a/docker/test/integration/resolver/requirements.txt
+++ b/docker/test/integration/resolver/requirements.txt
@@ -1,6 +1,6 @@
-bottle==0.12.25
-packaging==24.1
-pip==23.2.1
-pipdeptree==2.23.0
-setuptools==69.0.3
-wheel==0.42.0
+bottle~=0.13
+packaging~=24.1
+pip~=23.2.1
+pipdeptree~=2.23.0
+setuptools~=69.0.3
+wheel~=0.42.0
diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile
index 469f691bebb4..3e13cafa4658 100644
--- a/docker/test/integration/runner/Dockerfile
+++ b/docker/test/integration/runner/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/integration-tests-runner .
+# docker build -t altinityinfra/integration-tests-runner .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
@@ -85,7 +85,7 @@ COPY modprobe.sh /usr/local/bin/modprobe
COPY dockerd-entrypoint.sh /usr/local/bin/
COPY misc/ /misc/
-COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
# Same options as in test/base/Dockerfile
diff --git a/docker/test/integration/runner/dockerd-entrypoint.sh b/docker/test/integration/runner/dockerd-entrypoint.sh
index 63087d9d4c8c..6863ad7dd181 100755
--- a/docker/test/integration/runner/dockerd-entrypoint.sh
+++ b/docker/test/integration/runner/dockerd-entrypoint.sh
@@ -4,12 +4,12 @@ set -e
mkdir -p /etc/docker/
echo '{
"ipv6": true,
- "fixed-cidr-v6": "fd00::/8",
+ "fixed-cidr-v6": "2001:db8:1::/64",
"ip-forward": true,
"log-level": "debug",
"storage-driver": "overlay2",
- "insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"],
- "registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
+ "insecure-registries" : ["65.108.242.32:5000"],
+ "registry-mirrors" : ["http://65.108.242.32:5000"]
}' | dd of=/etc/docker/daemon.json 2>/dev/null
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
diff --git a/docker/test/integration/s3_proxy/Dockerfile b/docker/test/integration/s3_proxy/Dockerfile
index 5858218e4e4c..df8d8f00f216 100644
--- a/docker/test/integration/s3_proxy/Dockerfile
+++ b/docker/test/integration/s3_proxy/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/s3-proxy .
+# docker build -t altinityinfra/s3-proxy .
FROM nginx:alpine
COPY run.sh /run.sh
diff --git a/docker/test/keeper-jepsen/Dockerfile b/docker/test/keeper-jepsen/Dockerfile
index 3c5d0a6ecb42..d3080a526711 100644
--- a/docker/test/keeper-jepsen/Dockerfile
+++ b/docker/test/keeper-jepsen/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/keeper-jepsen-test .
+# docker build -t altinityinfra/keeper-jepsen-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814
diff --git a/docker/test/libfuzzer/Dockerfile b/docker/test/libfuzzer/Dockerfile
index 46e305c90ab4..157078d0f7f4 100644
--- a/docker/test/libfuzzer/Dockerfile
+++ b/docker/test/libfuzzer/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/libfuzzer .
+# docker build -t altinityinfra/libfuzzer .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
@@ -35,5 +35,5 @@ RUN pip3 install --no-cache-dir -r /requirements.txt
SHELL ["/bin/bash", "-c"]
-# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer
+# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> altinityinfra/libfuzzer
diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile
index f71392752826..4e80c729eeca 100644
--- a/docker/test/performance-comparison/Dockerfile
+++ b/docker/test/performance-comparison/Dockerfile
@@ -1,7 +1,7 @@
-# docker build -t clickhouse/performance-comparison .
+# docker build -t altinityinfra/performance-comparison .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
@@ -41,9 +41,9 @@ RUN pip3 --no-cache-dir install -r requirements.txt
COPY run.sh /
-COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
CMD ["bash", "/run.sh"]
-# docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison
+# docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> altinityinfra/performance-comparison
diff --git a/docker/test/server-jepsen/Dockerfile b/docker/test/server-jepsen/Dockerfile
index fd70fc457020..5207f31b953f 100644
--- a/docker/test/server-jepsen/Dockerfile
+++ b/docker/test/server-jepsen/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/server-jepsen-test .
+# docker build -t altinityinfra/server-jepsen-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814
diff --git a/docker/test/sqlancer/Dockerfile b/docker/test/sqlancer/Dockerfile
index 9a48bf6b8d3c..980dcfba928c 100644
--- a/docker/test/sqlancer/Dockerfile
+++ b/docker/test/sqlancer/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/sqlancer-test .
+# docker build -t altinityinfra/sqlancer-test .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
diff --git a/docker/test/sqllogic/Dockerfile b/docker/test/sqllogic/Dockerfile
index 0d21a2da44ee..767e5eecfa84 100644
--- a/docker/test/sqllogic/Dockerfile
+++ b/docker/test/sqllogic/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/sqllogic-test .
+# docker build -t altinityinfra/sqllogic-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
RUN apt-get update --yes \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/sqltest/Dockerfile b/docker/test/sqltest/Dockerfile
index b805bb03c2b0..e21cb2d7febb 100644
--- a/docker/test/sqltest/Dockerfile
+++ b/docker/test/sqltest/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/sqltest .
+# docker build -t altinityinfra/sqltest .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
RUN apt-get update --yes \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/stateful/Dockerfile b/docker/test/stateful/Dockerfile
index 9aa936cb069e..a3e2163b2731 100644
--- a/docker/test/stateful/Dockerfile
+++ b/docker/test/stateful/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #47031
-# docker build -t clickhouse/stateful-test .
+# docker build -t altinityinfra/stateful-test .
ARG FROM_TAG=latest
-FROM clickhouse/stateless-test:$FROM_TAG
+FROM altinityinfra/stateless-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile
index 8d9d683bbb92..2d99b850dba5 100644
--- a/docker/test/stateless/Dockerfile
+++ b/docker/test/stateless/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/stateless-test .
+# docker build -t altinityinfra/stateless-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"
diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile
index ecb98a4e3eda..4a1979a1c253 100644
--- a/docker/test/stress/Dockerfile
+++ b/docker/test/stress/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/stress-test .
+# docker build -t altinityinfra/stress-test .
ARG FROM_TAG=latest
-FROM clickhouse/stateful-test:$FROM_TAG
+FROM altinityinfra/stateful-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/stress/README.md b/docker/test/stress/README.md
index fe73555fbd23..3d0fa2c9f467 100644
--- a/docker/test/stress/README.md
+++ b/docker/test/stress/README.md
@@ -6,7 +6,7 @@ Usage:
```
$ ls $HOME/someclickhouse
clickhouse-client_18.14.9_all.deb clickhouse-common-static_18.14.9_amd64.deb clickhouse-server_18.14.9_all.deb
-$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output clickhouse/stress-test
+$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output altinityinfra/stress-test
Selecting previously unselected package clickhouse-common-static.
(Reading database ... 14442 files and directories currently installed.)
...
diff --git a/docker/test/unit/Dockerfile b/docker/test/unit/Dockerfile
index 9f4b86aa0ca7..adc72011bd1d 100644
--- a/docker/test/unit/Dockerfile
+++ b/docker/test/unit/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/unit-test .
+# docker build -t altinityinfra/unit-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
COPY run.sh /
RUN chmod +x run.sh
diff --git a/docker/test/upgrade/Dockerfile b/docker/test/upgrade/Dockerfile
new file mode 100644
index 000000000000..c66868c2a046
--- /dev/null
+++ b/docker/test/upgrade/Dockerfile
@@ -0,0 +1,29 @@
+# rebuild in #33610
+# docker build -t altinityinfra/upgrade-check .
+ARG FROM_TAG=latest
+FROM altinityinfra/stateful-test:$FROM_TAG
+
+RUN apt-get update -y \
+ && env DEBIAN_FRONTEND=noninteractive \
+ apt-get install --yes --no-install-recommends \
+ bash \
+ tzdata \
+ parallel \
+ expect \
+ python3 \
+ python3-lxml \
+ python3-termcolor \
+ python3-requests \
+ curl \
+ sudo \
+ openssl \
+ netcat-openbsd \
+ brotli \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
+
+COPY run.sh /
+
+ENV EXPORT_S3_STORAGE_POLICIES=1
+
+CMD ["/bin/bash", "/run.sh"]
diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile
index edc133e592fe..3fd757a06234 100644
--- a/docker/test/util/Dockerfile
+++ b/docker/test/util/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/test-util .
+# docker build -t altinityinfra/test-util .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
@@ -56,5 +56,5 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
-COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
diff --git a/src/Common/SignalHandlers.cpp b/src/Common/SignalHandlers.cpp
index eab3a6773aae..f50f72aceb33 100644
--- a/src/Common/SignalHandlers.cpp
+++ b/src/Common/SignalHandlers.cpp
@@ -577,14 +577,14 @@ try
}
/// Advice the user to send it manually.
- if (std::string_view(VERSION_OFFICIAL).contains("official build"))
+ if (std::string_view(VERSION_OFFICIAL).contains("altinity build"))
{
const auto & date_lut = DateLUT::instance();
/// Approximate support period, upper bound.
if (time(nullptr) - date_lut.makeDate(2000 + VERSION_MAJOR, VERSION_MINOR, 1) < (365 + 30) * 86400)
{
- LOG_FATAL(log, "Report this error to https://github.com/ClickHouse/ClickHouse/issues");
+ LOG_FATAL(log, "Report this error to https://github.com/Altinity/ClickHouse/issues");
}
else
{
diff --git a/tests/broken_tests.json b/tests/broken_tests.json
new file mode 100644
index 000000000000..f885334ff2f0
--- /dev/null
+++ b/tests/broken_tests.json
@@ -0,0 +1,281 @@
+{
+ "02700_s3_part_INT_MAX": {
+ "reason": "NEEDSFIX - Fails on asan"
+ },
+ "00157_cache_dictionary": {
+ "reason": "INVESTIGATE - fails with asan,debug,msan"
+ },
+ "02815_no_throw_in_simple_queries": {
+ "reason": "INVESTIGATE - Fails on asan,msan,tsan,debug,Aarch64"
+ },
+ "03206_no_exceptions_clickhouse_local": {
+ "reason": "INVESTIGATE - Fails on asan,msan,tsan,debug,Aarch64"
+ },
+ "03203_hive_style_partitioning": {
+ "reason": "INVESTIGATE - new fail in 24.12"
+ },
+ "00988_expansion_aliases_limit": {
+ "reason": "INVESTIGATE - Timeout on TSAN"
+ },
+ "03223_analyzer_with_cube_fuzz": {
+ "reason": "INVESTIGATE - Fails on TSAN"
+ },
+ "test_overcommit_tracker/test.py::test_user_overcommit": {
+ "reason": "INVESTIGATE - fails in tsan"
+ },
+ "01037_polygon_dicts_correctness_fast": {
+ "reason": "INVESTIGATE - fails in tsan"
+ },
+ "03094_grouparraysorted_memory": {
+ "reason": "INVESTIGATE - fails in tsan"
+ },
+ "test_attach_partition_using_copy/test.py::test_all_replicated": {
+ "reason": "NEEDSFIX - Web disk is not working"
+ },
+ "test_attach_partition_using_copy/test.py::test_both_mergetree": {
+ "reason": "NEEDSFIX - Web disk is not working"
+ },
+ "test_attach_partition_using_copy/test.py::test_not_work_on_different_disk": {
+ "reason": "NEEDSFIX - Web disk is not working"
+ },
+ "test_attach_partition_using_copy/test.py::test_only_destination_replicated": {
+ "reason": "NEEDSFIX - Web disk is not working"
+ },
+ "test_backup_restore_on_cluster/test_cancel_backup.py::test_shutdown_cancels_backup": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_backup_restore_on_cluster/test_different_versions.py::test_different_versions": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_grpc_protocol/test.py::test_ipv6_select_one": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN - FIXED in 24.8"
+ },
+ "test_settings_randomization/test.py::test_settings_randomization": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release"
+ },
+ "02766_prql": {
+ "reason": "KNOWN - PRQL is disabled in Antalya branch"
+ },
+ "02833_local_with_dialect": {
+ "reason": "KNOWN - PRQL is disabled in Antalya branch"
+ },
+ "03003_prql_panic": {
+ "reason": "KNOWN - PRQL is disabled in Antalya branch"
+ },
+ "01275_parallel_mv": {
+ "reason": "INVESTIGATE - Timeout on MSAN"
+ },
+ "test_git_import/test.py::test_git_import": {
+ "reason": "NEEDSFIX - 403 while getting data from GitHub"
+ },
+ "test_storage_delta/test.py::test_types[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_types[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_single_log_file[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_single_log_file[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_restart_broken[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_restart_broken[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_restart_broken_table_function[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_restart_broken_table_function[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_replicated_database_and_unavailable_s3[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_replicated_database_and_unavailable_s3[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_partition_columns[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_partition_columns[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_partition_columns_2[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_partition_by[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_partition_by[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_multiple_log_files[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_multiple_log_files[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_metadata[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_metadata[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_filesystem_cache[1-s3]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_filesystem_cache[0-s3]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_complex_types[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_complex_types[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_checkpoint[1]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_storage_delta/test.py::test_checkpoint[0]": {
+ "reason": "NEEDSFIX - rust problem"
+ },
+ "test_cow_policy/test.py::test_cow_policy[cow_policy_multi_disk]": {
+ "reason": "NEEDSFIX - Timeout"
+ },
+ "test_cow_policy/test.py::test_cow_policy[cow_policy_multi_volume]": {
+ "reason": "NEEDSFIX - Timeout"
+ },
+ "test_s3_assume_role/test.py::test_using_assumed_creds": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_asynchronous_metric_jemalloc_profile_active/test.py::test_asynchronous_metric_jemalloc_profile_active": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_backup_restore_new/test.py::test_incremental_backup_overflow": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_mask_sensitive_info/test.py::test_create_table": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_mask_sensitive_info/test.py::test_table_functions": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_storage_iceberg/test.py::test_types[s3-1]": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_storage_iceberg/test.py::test_types[azure-2]": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_storage_iceberg/test.py::test_types[local-1]": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_storage_iceberg/test.py::test_types[local-2]": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_storage_iceberg/test.py::test_types[s3-2]": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_storage_iceberg/test.py::test_types[azure-1]": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_library_bridge/test_exiled.py::test_bridge_dies_with_parent": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_merge_tree_check_part_with_cache/test.py::test_check_part_with_cache": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_concurrent_queries": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_many_connections": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_mysql_insert": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_mysql_odbc_select_nullable": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_mysql_simple_select_works": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_no_connection_pooling": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_odbc_cyrillic_with_varchar": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_odbc_long_column_names": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_odbc_long_text": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_odbc_postgres_conversions": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_odbc_postgres_date_data_type": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_odbc_interaction/test.py::test_postgres_insert": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_async_metrics_in_cgroup/test.py::test_normalized_user_cpu": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_async_metrics_in_cgroup/test.py::test_system_wide_metrics": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_async_metrics_in_cgroup/test.py::test_user_cpu_accounting": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_jemalloc_percpu_arena/test.py::test_jemalloc_percpu_arena": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "02354_vector_search_expansion_search": {
+ "reason": "INVESTIGATE - Timeout on TSAN"
+ },
+ "test_hedged_requests_parallel/test.py::test_combination1": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_hedged_requests_parallel/test.py::test_combination2": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_hedged_requests_parallel/test.py::test_query_with_no_data_to_sample": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_hedged_requests_parallel/test.py::test_send_data": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_hedged_requests_parallel/test.py::test_send_table_status_sleep": {
+ "reason": "INVESTIGATE - Fails on ASAN, ASAN old analyzer, Release, TSAN"
+ },
+ "test_s3_cache_locality/test.py::test_cache_locality": {
+ "reason": "INVESTIGATE - Timeout on TSAN"
+ },
+ "test_reload_clusters_config/test.py::test_update_one_cluster": {
+ "reason": "INVESTIGATE - Fails on ASAN old analyzer"
+ },
+ "test_refreshable_mv/test.py::test_replicated_db_startup_race": {
+ "reason": "NEEDSFIX - Bad sed syntax"
+ },
+ "test_database_delta/test.py::test_complex_table_schema": {
+ "reason": "Investigate - Timeout"
+ },
+ "test_database_delta/test.py::test_multiple_schemes_tables": {
+ "reason": "Investigate - Cluster fails to start"
+ },
+ "test_database_delta/test.py::test_embedded_database_and_tables": {
+ "reason": "Investigate - Cluster fails to start"
+ },
+ "test_storage_rabbitmq/test.py::test_rabbitmq_sharding_between_queues_publish": {
+ "reason": "INVESTIGATE - Timeout on TSAN"
+ }
+}
diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py
index 160e4b6a62d8..83689c915452 100644
--- a/tests/ci/build_check.py
+++ b/tests/ci/build_check.py
@@ -12,20 +12,22 @@
import docker_images_helper
from ci_config import CI
from ci_utils import Shell
-from env_helper import REPO_COPY, S3_BUILDS_BUCKET
+from env_helper import REPO_COPY, S3_BUILDS_BUCKET, TEMP_PATH, S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY
from git_helper import Git, checkout_submodules, unshallow
-from pr_info import PRInfo
+from pr_info import PRInfo, EventType
from report import FAILURE, SUCCESS, JobReport, StatusType
from s3_helper import S3Helper
from stopwatch import Stopwatch
from tee_popen import TeePopen
from version_helper import (
ClickHouseVersion,
+ VersionType,
get_version_from_repo,
update_version_local,
+ get_version_from_tag,
)
-IMAGE_NAME = "clickhouse/binary-builder"
+IMAGE_NAME = "altinityinfra/binary-builder"
BUILD_LOG_NAME = "build_log.log"
@@ -77,6 +79,8 @@ def get_packager_cmd(
cmd += " --cache=sccache"
cmd += " --s3-rw-access"
cmd += f" --s3-bucket={S3_BUILDS_BUCKET}"
+ cmd += f" --s3-access-key-id={S3_ACCESS_KEY_ID}"
+ cmd += f" --s3-secret-access-key={S3_SECRET_ACCESS_KEY}"
if build_config.additional_pkgs:
cmd += " --additional-pkgs"
@@ -185,16 +189,36 @@ def main():
version = get_version_from_repo(git=Git(True))
logging.info("Got version from repo %s", version.string)
- official_flag = pr_info.number == 0
+ # official_flag = pr_info.number == 0
- version_type = "testing"
- if is_release_pr(pr_info):
- version_type = "stable"
- official_flag = True
+ # version_type = "testing"
+ # if is_release_pr(pr_info):
+ # version_type = "stable"
+ # official_flag = True
+
+ # NOTE(vnemkov): For Altinity builds, version flavor is taken from autogenerated_versions.txt
+ official_flag = True
+
+ if pr_info.event_type == EventType.PUSH \
+ and pr_info.ref.startswith('refs/tags/'):
+ tag_name = pr_info.ref.removeprefix('refs/tags/')
+
+ version_from_tag = get_version_from_tag(tag_name)
+
+ # tag can override only `tweak` and `flavour`
+ assert version_from_tag.major == version.major \
+ and version_from_tag.minor == version.minor \
+ and version_from_tag.patch == version.patch
+
+ version._flavour = version_from_tag._flavour
+ version.tweak = version_from_tag.tweak
- update_version_local(version, version_type)
+ logging.info("Updated version info from tag: %s => %s", tag_name, version)
- logging.info("Updated local files with version")
+ # TODO(vnemkov): make sure tweak part is incremented by 1 each time we merge a PR
+ update_version_local(version, version._flavour)
+
+ logging.info("Updated local files with version %s", version)
logging.info("Build short name %s", build_name)
@@ -228,6 +252,28 @@ def main():
f"sudo chown -R ubuntu:ubuntu {build_output_path}", shell=True
)
logging.info("Build finished as %s, log path %s", build_status, log_path)
+
+ s3_helper = S3Helper()
+ s3_path_prefix = "/".join(
+ (
+ get_release_or_pr(pr_info, get_version_from_repo())[0],
+ pr_info.sha,
+ build_name,
+ )
+ )
+ src_path = Path(TEMP_PATH) / "build_source.src.tar.gz"
+ s3_path = s3_path_prefix + "/clickhouse-" + version.string + ".src.tar.gz"
+ logging.info("s3_path %s", s3_path)
+ if src_path.exists():
+ src_url = s3_helper.upload_build_file_to_s3(
+ src_path, s3_path
+ )
+ logging.info("Source tar %s", src_url)
+ print(f"::notice ::Source tar URL: {src_url}")
+ else:
+ logging.info("Source tar doesn't exist")
+ print("Source tar doesn't exist")
+
if build_status != SUCCESS:
# We check if docker works, because if it's down, it's infrastructure
try:
diff --git a/tests/ci/build_report_check.py b/tests/ci/build_report_check.py
index de14d01de9e9..9246be708a7c 100644
--- a/tests/ci/build_report_check.py
+++ b/tests/ci/build_report_check.py
@@ -98,9 +98,9 @@ def main():
missing_builds += 1
build_results.insert(0, build_result)
else:
- assert (
- pr_info.head_ref == build_result.head_ref or pr_info.number > 0
- ), "BUG. if not a PR, report must be created on the same branch"
+ # if pr_info.head_ref == build_result.head_ref or pr_info.number > 0:
+ logging.error("BUG. if not a PR, report must be created on the same branch, build_name: %s, \npr_info: %s,\nbuild_result: %s",
+ build_name, pr_info, build_result)
build_results.append(build_result)
# Calculate artifact groups like packages and binaries
diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py
index efe16a57ca4d..4ef57eb004a6 100755
--- a/tests/ci/changelog.py
+++ b/tests/ci/changelog.py
@@ -25,6 +25,7 @@
get_abs_path,
get_version_from_repo,
get_version_from_tag,
+ get_version_from_string,
)
# This array gives the preferred category order, and is also used to
@@ -52,13 +53,14 @@
class Description:
def __init__(
- self, number: int, user: NamedUser, html_url: str, entry: str, category: str
+ self, number: int, user: NamedUser, html_url: str, entry: str, category: str, backport_pr: int = None
):
self.number = number
self.html_url = html_url
self.user = gh.get_user_cached(user._rawData["login"]) # type: ignore
self.entry = entry
self.category = category
+ self.backport_pr = backport_pr
@property
def formatted_entry(self) -> str:
@@ -76,20 +78,21 @@ def formatted_entry(self) -> str:
r"\1[#\3](\2)",
entry,
)
- # It's possible that we face a secondary rate limit.
- # In this case we should sleep until we get it
- while True:
- try:
- user_name = self.user.name if self.user.name else self.user.login
- break
- except UnknownObjectException:
- user_name = self.user.login
- break
- except RateLimitExceededException:
- gh.sleep_on_rate_limit()
+ # # It's possible that we face a secondary rate limit.
+ # # In this case we should sleep until we get it
+ # while True:
+ # try:
+ # user_name = self.user.name if self.user.name else self.user.login
+ # break
+ # except UnknownObjectException:
+ # user_name = self.user.login
+ # break
+ # except RateLimitExceededException:
+ # gh.sleep_on_rate_limit()
+
+ backport_clause = '' if self.backport_pr is None else f' via {self.backport_pr}'
return (
- f"* {entry} [#{self.number}]({self.html_url}) "
- f"([{user_name}]({self.user.html_url}))."
+ f"* {entry} (#{self.number} by @{self.user.login}{backport_clause})"
)
# Sort PR descriptions by numbers
@@ -159,7 +162,7 @@ def parse_args() -> argparse.Namespace:
)
parser.add_argument(
"--repo",
- default="ClickHouse/ClickHouse",
+ default="Altinity/ClickHouse",
help="a repository to query for pull-requests from GitHub",
)
parser.add_argument(
@@ -200,13 +203,14 @@ def parse_args() -> argparse.Namespace:
# Returns None if the PR should not be mentioned in changelog.
def generate_description(item: PullRequest, repo: Repository) -> Optional[Description]:
backport_number = item.number
- if item.head.ref.startswith("backport/"):
+ # NOTE(vnemkov): intentionally without trailing slash, so it will match upstream's 'backport/' and our 'backports/' branch names.
+ if item.head.ref.startswith("backport"):
branch_parts = item.head.ref.split("/")
if len(branch_parts) == 3:
try:
item = gh.get_pull_cached(repo, int(branch_parts[-1]))
except Exception as e:
- logging.warning("unable to get backported PR, exception: %s", e)
+ logging.warning(f"unable to get backported PR for %s, exception %s", item, e)
else:
logging.warning(
"The branch %s doesn't match backport template, using PR %s as is",
@@ -296,8 +300,9 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri
):
category = "Bug Fix (user-visible misbehavior in an official stable release)"
+ backport_pr = None
if backport_number != item.number:
- entry = f"Backported in #{backport_number}: {entry}"
+ backport_pr = backport_number
if not entry:
# Shouldn't happen, because description check in CI should catch such PRs.
@@ -313,7 +318,7 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri
category = c
break
- return Description(item.number, item.user, item.html_url, entry, category)
+ return Description(item.number, item.user, item.html_url, entry, category, backport_pr=backport_pr)
def write_changelog(
@@ -399,6 +404,19 @@ def get_year(prs: PullRequests) -> int:
def get_branch_and_patch_by_tag(tag: str) -> Tuple[Optional[str], Optional[int]]:
+ try:
+ try:
+ patch = get_version_from_string(tag.removeprefix('v')).patch
+ branch = runner.run(f"git branch --contains {tag} | head -n1", stderr=DEVNULL).strip()
+ except:
+ # most likely the `tag` is not a tag, but a commit
+ patch = 999999999999
+ branch = runner.run(f"git branch --contains {tag} | head -n1", stderr=DEVNULL).strip().removeprefix('*').strip()
+
+ return branch, patch
+ except:
+ logging.exception(f"Failed to get branch from a tag {tag} using git commands", exc_info=True)
+
tag = tag.removeprefix("v")
versions = tag.split(".")
if len(versions) < 4:
diff --git a/tests/ci/ci.py b/tests/ci/ci.py
old mode 100644
new mode 100755
index a7acf987cacd..635458e5c122
--- a/tests/ci/ci.py
+++ b/tests/ci/ci.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
import argparse
import concurrent.futures
import json
@@ -61,7 +63,11 @@
from s3_helper import S3Helper
from stopwatch import Stopwatch
from tee_popen import TeePopen
-from version_helper import get_version_from_repo
+from version_helper import (
+ get_version_from_repo,
+ get_version_from_string,
+ update_cmake_version,
+)
# pylint: disable=too-many-lines,too-many-branches
@@ -297,6 +303,8 @@ def _pre_action(s3, job_name, batch, indata, pr_info):
# testing), otherwise reports won't be found
if not (pr_info.is_scheduled or pr_info.is_dispatched):
report_prefix = Utils.normalize_string(pr_info.head_ref)
+ elif pr_info.is_pr:
+ report_prefix = str(pr_info.number)
print(
f"Use report prefix [{report_prefix}], pr_num [{pr_info.number}], head_ref [{pr_info.head_ref}]"
)
@@ -388,7 +396,7 @@ def _pre_action(s3, job_name, batch, indata, pr_info):
_get_ext_check_name(job_name),
)
ClickHouseHelper().insert_events_into(
- db="default", table="checks", events=prepared_events
+ db="gh-data", table="checks", events=prepared_events
)
print(f"Pre action done. Report files [{reports_files}] have been downloaded")
@@ -961,7 +969,7 @@ def _add_build_to_version_history(
print(f"::notice ::Log Adding record to versions history: {data}")
- ch_helper.insert_event_into(db="default", table="version_history", event=data)
+ ch_helper.insert_event_into(db="gh-data", table="version_history", event=data)
def _run_test(job_name: str, run_command: str) -> int:
@@ -1111,8 +1119,12 @@ def main() -> int:
git_ref = git_runner.run(f"{GIT_PREFIX} rev-parse HEAD")
# let's get CH version
- version = get_version_from_repo(git=Git(True)).string
- print(f"Got CH version for this commit: [{version}]")
+ git = Git(True)
+ version = get_version_from_repo(git = git).string
+ print(f"""Got CH version for this commit: [{version}]
+ latest tag: {git.latest_tag} ({git.commits_since_latest} commits back),
+ latest upstream tag: {git.latest_upstream_tag} ({git.commits_since_upstream} commits back)
+ """)
docker_data = (
_configure_docker_jobs(args.docker_digest_or_latest)
@@ -1191,6 +1203,31 @@ def main() -> int:
print(
f"Check if rerun for name: [{check_name}], extended name [{check_name_with_group}]"
)
+ # NOTE (vnemkov) Job might have not checked out git tags, so it can't properly compute version number.
+ # BUT if there is pre-computed version from `RunConfig` then we can reuse it.
+ pre_configured_version = indata.get('version', None)
+ git = Git(True)
+ if pre_configured_version is not None and git.commits_since_latest == 0:
+ print(f"Updating version in repo files from '{get_version_from_repo()}' to '{pre_configured_version}'")
+
+ pre_configured_version = get_version_from_string(pre_configured_version, git)
+ # need to set description, otherwise subsequent call (perhaps from other script) to get_version_from_repo() fails
+ pre_configured_version.with_description(pre_configured_version.flavour)
+
+ update_cmake_version(pre_configured_version)
+
+ # NOTE (vnemkov) Job might have not checked out git tags, so it can't properly compute version number.
+ # BUT if there is pre-computed version from `RunConfig` then we can reuse it.
+ pre_configured_version = indata.get('version', None)
+ git = Git(True)
+ if pre_configured_version is not None and git.commits_since_latest == 0:
+ print(f"Updating version in repo files from '{get_version_from_repo()}' to '{pre_configured_version}'")
+
+ pre_configured_version = get_version_from_string(pre_configured_version, git)
+ # need to set description, otherwise subsequent call (perhaps from other script) to get_version_from_repo() fails
+ pre_configured_version.with_description(pre_configured_version.flavour)
+
+ update_cmake_version(pre_configured_version)
if job_report.job_skipped and not args.force:
print(
@@ -1308,7 +1345,7 @@ def main() -> int:
job_report.check_name or _get_ext_check_name(args.job_name),
)
ch_helper.insert_events_into(
- db="default", table="checks", events=prepared_events
+ db="gh-data", table="checks", events=prepared_events
)
elif job_report.job_skipped:
@@ -1374,7 +1411,7 @@ def main() -> int:
_get_ext_check_name(args.job_name),
)
ClickHouseHelper().insert_events_into(
- db="default", table="checks", events=prepared_events
+ db="gh-data", table="checks", events=prepared_events
)
### POST action: end
diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py
index 07b748180cd3..33dec4938692 100644
--- a/tests/ci/ci_buddy.py
+++ b/tests/ci/ci_buddy.py
@@ -97,7 +97,9 @@ def _get_webhooks():
return json_string
def post(self, message: str, channels: List[str]) -> None:
- print(f"Posting slack message, dry_run [{self.dry_run}]")
+ print(f"Would've posted slack message, dry_run [{self.dry_run}], message: {message}")
+ # NOTE(vnemkov): we don't use slack for CI/CD no need to post messages
+ return
if self.dry_run:
urls = [self.channels[Channels.DRY_RUN]]
else:
diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py
index 1fd4726e2ee5..83cc92a11058 100644
--- a/tests/ci/ci_cache.py
+++ b/tests/ci/ci_cache.py
@@ -41,7 +41,7 @@ class CiCache:
release - for jobs being executed on the release branch including master branch (not a PR branch)
"""
- _REQUIRED_DIGESTS = [CI.JobNames.DOCS_CHECK, CI.BuildNames.PACKAGE_RELEASE]
+ _REQUIRED_DIGESTS = [CI.BuildNames.PACKAGE_RELEASE]
_S3_CACHE_PREFIX = "CI_cache_v1"
_CACHE_BUILD_REPORT_PREFIX = "build_report"
_RECORD_FILE_EXTENSION = ".ci"
@@ -979,7 +979,6 @@ def apply(
"ClickBench (aarch64)": "45c07c4aa6",
"Docker server image": "6a24d5b187",
"Docker keeper image": "6a24d5b187",
- "Docs check": "4764154c62",
"Fast test": "cb269133f2",
"Style check": "ffffffffff",
"Stress test (msan)": "aa298abf10",
diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py
index 88a35cfa0bd7..bbb14dfb3317 100644
--- a/tests/ci/ci_config.py
+++ b/tests/ci/ci_config.py
@@ -287,18 +287,19 @@ class CI:
runner_type=Runners.STYLE_CHECKER_AARCH64,
),
JobNames.STATELESS_TEST_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties(
- required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2
+ required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2, timeout=9000
),
JobNames.STATELESS_TEST_AARCH64_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_AARCH64_ASAN],
num_batches=2,
runner_type=Runners.FUNC_TESTER_AARCH64,
+ timeout=9000,
),
JobNames.STATELESS_TEST_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
- required_builds=[BuildNames.PACKAGE_TSAN], num_batches=4
+ required_builds=[BuildNames.PACKAGE_TSAN], num_batches=6, timeout=9000
),
JobNames.STATELESS_TEST_MSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
- required_builds=[BuildNames.PACKAGE_MSAN], num_batches=4
+ required_builds=[BuildNames.PACKAGE_MSAN], num_batches=6, timeout=9000
),
JobNames.STATELESS_TEST_UBSAN: CommonJobConfigs.STATELESS_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_UBSAN], num_batches=2
@@ -340,15 +341,12 @@ class CI:
),
JobNames.STRESS_TEST_ASAN: CommonJobConfigs.STRESS_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_ASAN],
- random_bucket="stress_with_sanitizer",
),
JobNames.STRESS_TEST_UBSAN: CommonJobConfigs.STRESS_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_UBSAN],
- random_bucket="stress_with_sanitizer",
),
JobNames.STRESS_TEST_MSAN: CommonJobConfigs.STRESS_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_MSAN],
- random_bucket="stress_with_sanitizer",
),
JobNames.STRESS_TEST_AZURE_TSAN: CommonJobConfigs.STRESS_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_TSAN], release_only=True
@@ -376,28 +374,25 @@ class CI:
),
JobNames.INTEGRATION_TEST_ASAN: CommonJobConfigs.INTEGRATION_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_ASAN],
- release_only=True,
- num_batches=4,
- timeout=10800,
+ num_batches=8,
),
JobNames.INTEGRATION_TEST_ASAN_OLD_ANALYZER: CommonJobConfigs.INTEGRATION_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_ASAN],
- num_batches=6,
+ num_batches=8,
),
JobNames.INTEGRATION_TEST_TSAN: CommonJobConfigs.INTEGRATION_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_TSAN],
- num_batches=6,
- timeout=9000, # the job timed out with default value (7200)
+ num_batches=8,
),
JobNames.INTEGRATION_TEST_AARCH64: CommonJobConfigs.INTEGRATION_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_AARCH64],
- num_batches=6,
+ num_batches=8,
runner_type=Runners.FUNC_TESTER_AARCH64,
),
JobNames.INTEGRATION_TEST: CommonJobConfigs.INTEGRATION_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_RELEASE],
- num_batches=4,
- release_only=True,
+ num_batches=8,
+ # release_only=True,
),
JobNames.INTEGRATION_TEST_FLAKY: CommonJobConfigs.INTEGRATION_TEST.with_properties(
required_builds=[BuildNames.PACKAGE_ASAN],
@@ -522,10 +517,6 @@ class CI:
required_builds=[BuildNames.PACKAGE_RELEASE, BuildNames.PACKAGE_AARCH64]
),
JobNames.DOCS_CHECK: JobConfig(
- digest=DigestConfig(
- include_paths=["**/*.md", "./docs", "tests/ci/docs_check.py"],
- docker=["clickhouse/docs-builder"],
- ),
run_command="docs_check.py",
runner_type=Runners.FUNC_TESTER,
),
@@ -539,7 +530,7 @@ class CI:
"./tests/clickhouse-test",
],
exclude_files=[".md"],
- docker=["clickhouse/fasttest"],
+ docker=["altinityinfra/fasttest"],
),
run_command="fast_test_check.py",
timeout=2400,
@@ -556,6 +547,14 @@ class CI:
timeout=2400,
runner_type=Runners.STYLE_CHECKER,
),
+ JobNames.SIGN_RELEASE: JobConfig(
+ required_builds=[BuildNames.PACKAGE_RELEASE],
+ runner_type=Runners.STYLE_CHECKER,
+ ),
+ JobNames.SIGN_AARCH64: JobConfig(
+ required_builds=[BuildNames.PACKAGE_AARCH64],
+ runner_type=Runners.STYLE_CHECKER_AARCH64,
+ ),
}
@classmethod
diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py
index 6e7ef18b0a0e..7f7086c00186 100644
--- a/tests/ci/ci_definitions.py
+++ b/tests/ci/ci_definitions.py
@@ -59,12 +59,12 @@ class Runners(metaclass=WithIter):
GitHub runner's labels
"""
- BUILDER = "builder"
- BUILDER_AARCH64 = "builder-aarch64"
- STYLE_CHECKER = "style-checker"
- STYLE_CHECKER_AARCH64 = "style-checker-aarch64"
- FUNC_TESTER = "func-tester"
- FUNC_TESTER_AARCH64 = "func-tester-aarch64"
+ BUILDER = "altinity-builder"
+ BUILDER_AARCH64 = "altinity-builder-aarch64"
+ STYLE_CHECKER = "altinity-style-checker"
+ STYLE_CHECKER_AARCH64 = "altinity-style-checker-aarch64"
+ FUNC_TESTER = "altinity-func-tester"
+ FUNC_TESTER_AARCH64 = "altinity-func-tester-aarch64"
FUZZER_UNIT_TESTER = "fuzzer-unit-tester"
@@ -222,6 +222,9 @@ class JobNames(metaclass=WithIter):
DOCS_CHECK = "Docs check"
BUGFIX_VALIDATE = "Bugfix validation"
+ SIGN_RELEASE = "Sign release"
+ SIGN_AARCH64 = "Sign aarch64"
+
# hack to concatenate Build and non-build jobs under JobNames class
for attr_name in dir(BuildNames):
@@ -331,13 +334,13 @@ class JobConfig:
# will be triggered for the job if omitted in CI workflow yml
run_command: str = ""
# job timeout, seconds
- timeout: int = 7200
+ timeout: int = 7220
# sets number of batches for a multi-batch job
num_batches: int = 1
# label that enables job in CI, if set digest isn't used
run_by_labels: List[str] = field(default_factory=list)
# to run always regardless of the job digest or/and label
- run_always: bool = False
+ run_always: bool = True
# disables CI await for a given job
disable_await: bool = False
# if the job needs to be run on the release branch, including master (building packages, docker server).
@@ -390,7 +393,7 @@ class CommonJobConfigs:
job_name_keyword="compatibility",
digest=DigestConfig(
include_paths=["./tests/ci/compatibility_check.py"],
- docker=["clickhouse/test-old-ubuntu", "clickhouse/test-old-centos"],
+ docker=["altinityinfra/test-old-ubuntu", "altinityinfra/test-old-centos"],
),
run_command="compatibility_check.py",
runner_type=Runners.STYLE_CHECKER,
@@ -399,7 +402,7 @@ class CommonJobConfigs:
job_name_keyword="install",
digest=DigestConfig(
include_paths=["./tests/ci/install_check.py"],
- docker=["clickhouse/install-deb-test", "clickhouse/install-rpm-test"],
+ docker=["altinityinfra/install-deb-test", "altinityinfra/install-rpm-test"],
),
run_command='install_check.py "$CHECK_NAME"',
runner_type=Runners.STYLE_CHECKER,
@@ -417,7 +420,7 @@ class CommonJobConfigs:
"./tests/docker_scripts/",
],
exclude_files=[".md"],
- docker=["clickhouse/stateless-test"],
+ docker=["altinityinfra/stateless-test"],
),
run_command='functional_test_check.py "$CHECK_NAME"',
runner_type=Runners.FUNC_TESTER,
@@ -434,7 +437,7 @@ class CommonJobConfigs:
"./tests/docker_scripts/",
],
exclude_files=[".md"],
- docker=["clickhouse/stress-test"],
+ docker=["altinityinfra/stress-test"],
),
run_command="stress_check.py",
runner_type=Runners.FUNC_TESTER,
@@ -445,7 +448,7 @@ class CommonJobConfigs:
digest=DigestConfig(
include_paths=["./tests/ci/upgrade_check.py", "./tests/docker_scripts/"],
exclude_files=[".md"],
- docker=["clickhouse/stress-test"],
+ docker=["altinityinfra/stress-test"],
),
run_command="upgrade_check.py",
runner_type=Runners.FUNC_TESTER,
@@ -471,7 +474,7 @@ class CommonJobConfigs:
include_paths=[
"./tests/ci/ci_fuzzer_check.py",
],
- docker=["clickhouse/fuzzer"],
+ docker=["altinityinfra/fuzzer"],
),
run_command="ci_fuzzer_check.py",
run_always=True,
@@ -483,7 +486,7 @@ class CommonJobConfigs:
include_paths=[
"./tests/ci/ci_fuzzer_check.py",
],
- docker=["clickhouse/fuzzer"],
+ docker=["altinityinfra/fuzzer"],
),
run_command="ci_fuzzer_check.py",
run_always=True,
@@ -494,7 +497,7 @@ class CommonJobConfigs:
digest=DigestConfig(
include_paths=["./tests/ci/unit_tests_check.py"],
exclude_files=[".md"],
- docker=["clickhouse/unit-test"],
+ docker=["altinityinfra/unit-test"],
),
run_command="unit_tests_check.py",
runner_type=Runners.FUZZER_UNIT_TESTER,
@@ -507,7 +510,7 @@ class CommonJobConfigs:
"./tests/performance/",
],
exclude_files=[".md"],
- docker=["clickhouse/performance-comparison"],
+ docker=["altinityinfra/performance-comparison"],
),
run_command="performance_comparison_check.py",
runner_type=Runners.FUNC_TESTER,
@@ -525,7 +528,7 @@ class CommonJobConfigs:
digest=DigestConfig(
include_paths=["./tests/ci/sqllogic_test.py"],
exclude_files=[".md"],
- docker=["clickhouse/sqllogic-test"],
+ docker=["altinityinfra/sqllogic-test"],
),
run_command="sqllogic_test.py",
timeout=10800,
@@ -537,7 +540,7 @@ class CommonJobConfigs:
digest=DigestConfig(
include_paths=["./tests/ci/sqltest.py"],
exclude_files=[".md"],
- docker=["clickhouse/sqltest"],
+ docker=["altinityinfra/sqltest"],
),
run_command="sqltest.py",
timeout=10800,
@@ -554,7 +557,7 @@ class CommonJobConfigs:
DOCKER_SERVER = JobConfig(
job_name_keyword="docker",
required_on_release_branch=True,
- run_command='docker_server.py --check-name "$CHECK_NAME" --tag-type head --allow-build-reuse',
+ run_command='docker_server.py --check-name "$CHECK_NAME" --tag-type head --allow-build-reuse --push',
digest=DigestConfig(
include_paths=[
"tests/ci/docker_server.py",
@@ -571,7 +574,7 @@ class CommonJobConfigs:
include_paths=[
"tests/ci/clickbench.py",
],
- docker=["clickhouse/clickbench"],
+ docker=["altinityinfra/clickbench"],
),
run_command='clickbench.py "$CHECK_NAME"',
timeout=900,
@@ -600,7 +603,7 @@ class CommonJobConfigs:
"./tests/performance",
],
exclude_files=[".md"],
- docker=["clickhouse/binary-builder"],
+ docker=["altinityinfra/binary-builder"],
git_submodules=True,
),
run_command="build_check.py $BUILD_NAME",
@@ -611,7 +614,6 @@ class CommonJobConfigs:
REQUIRED_CHECKS = [
StatusNames.PR_CHECK,
JobNames.BUILD_CHECK,
- JobNames.DOCS_CHECK,
JobNames.FAST_TEST,
JobNames.STATELESS_TEST_RELEASE,
JobNames.STATELESS_TEST_ASAN,
diff --git a/tests/ci/ci_fuzzer_check.py b/tests/ci/ci_fuzzer_check.py
index 21bf77aa789d..7dbf24ce6e96 100644
--- a/tests/ci/ci_fuzzer_check.py
+++ b/tests/ci/ci_fuzzer_check.py
@@ -15,7 +15,7 @@
from stopwatch import Stopwatch
from tee_popen import TeePopen
-IMAGE_NAME = "clickhouse/fuzzer"
+IMAGE_NAME = "altinityinfra/fuzzer"
def get_run_command(
diff --git a/tests/ci/clickbench.py b/tests/ci/clickbench.py
index 349a848e9c7a..ffb8913c0893 100644
--- a/tests/ci/clickbench.py
+++ b/tests/ci/clickbench.py
@@ -20,7 +20,7 @@
def get_image_name() -> str:
- return "clickhouse/clickbench"
+ return "altinityinfra/clickbench"
def get_run_command(
diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py
index 32e7f6f6a53d..44f75ad6806d 100644
--- a/tests/ci/clickhouse_helper.py
+++ b/tests/ci/clickhouse_helper.py
@@ -9,7 +9,7 @@
import requests
-from env_helper import GITHUB_REPOSITORY
+from env_helper import CLICKHOUSE_TEST_STAT_URL, CLICKHOUSE_TEST_STAT_PASSWORD, CLICKHOUSE_TEST_STAT_LOGIN
from get_robot_token import get_parameter_from_ssm
from pr_info import PRInfo
from report import TestResults
@@ -28,12 +28,12 @@ def __init__(
self, url: Optional[str] = None, auth: Optional[Dict[str, str]] = None
):
if url is None:
- url = get_parameter_from_ssm("clickhouse-test-stat-url")
+ url = CLICKHOUSE_TEST_STAT_URL
self.url = url
self.auth = auth or {
- "X-ClickHouse-User": get_parameter_from_ssm("clickhouse-test-stat-login"),
- "X-ClickHouse-Key": get_parameter_from_ssm("clickhouse-test-stat-password"),
+ "X-ClickHouse-User": CLICKHOUSE_TEST_STAT_LOGIN,
+ "X-ClickHouse-Key": CLICKHOUSE_TEST_STAT_PASSWORD,
}
@staticmethod
@@ -212,11 +212,11 @@ def prepare_tests_results_for_clickhouse(
report_url: str,
check_name: str,
) -> List[dict]:
- base_ref = pr_info.base_ref
- base_repo = pr_info.base_name
- head_ref = pr_info.head_ref
- head_repo = pr_info.head_name
- pull_request_url = f"https://github.com/{GITHUB_REPOSITORY}/commits/{head_ref}"
+ pull_request_url = "https://github.com/Altinity/ClickHouse/commits/master"
+ base_ref = "master"
+ head_ref = "master"
+ base_repo = pr_info.repo_full_name
+ head_repo = pr_info.repo_full_name
if pr_info.number != 0:
pull_request_url = pr_info.pr_html_url
diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py
index 56e0608e9652..0e101d67f022 100644
--- a/tests/ci/commit_status_helper.py
+++ b/tests/ci/commit_status_helper.py
@@ -32,6 +32,7 @@
)
from s3_helper import S3Helper
from upload_result_helper import upload_results
+from get_robot_token import get_best_robot_token
RETRY = 5
CommitStatuses = List[CommitStatus]
@@ -155,9 +156,11 @@ def set_status_comment(commit: Commit, pr_info: PRInfo) -> None:
# CI Running status is deprecated for ClickHouse repo
return
- # to reduce number of parameters, the Github is constructed on the fly
- gh = Github()
- gh.__requester = commit._requester # type:ignore #pylint:disable=protected-access
+ gh = Github(**commit.requester.kwargs)
+ # Check that requests work at all
+ logging.info('Rate limit response for current GH token: %s',
+ gh.requester.graphql_query('rateLimit { limit remaining resetAt used }', {}))
+
repo = get_repo(gh)
statuses = sorted(get_commit_filtered_statuses(commit), key=lambda x: x.context)
statuses = [
diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py
index ca1a8619ccbc..174086917ec9 100644
--- a/tests/ci/compatibility_check.py
+++ b/tests/ci/compatibility_check.py
@@ -17,8 +17,8 @@
from report import FAILURE, SUCCESS, JobReport, TestResult, TestResults
from stopwatch import Stopwatch
-IMAGE_UBUNTU = "clickhouse/test-old-ubuntu"
-IMAGE_CENTOS = "clickhouse/test-old-centos"
+IMAGE_UBUNTU = "altinityinfra/test-old-ubuntu"
+IMAGE_CENTOS = "altinityinfra/test-old-centos"
DOWNLOAD_RETRIES_COUNT = 5
diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py
index 6918d57f9486..aee34f2b3f98 100755
--- a/tests/ci/create_release.py
+++ b/tests/ci/create_release.py
@@ -526,7 +526,7 @@ def update_release_info(self, dry_run: bool) -> "ReleaseInfo":
url = "dry-run"
print(f"ChangeLog PR url [{url}]")
self.changelog_pr = url
- self.docker = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version"
+ self.docker = f"docker run --rm altinityinfra/clickhouse:{self.version} clickhouse --version"
else:
# new release branch - find version bump pr on a master branch
branch = self.get_version_bump_branch()
diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py
index 63a01770eef2..03b6fe659c9c 100644
--- a/tests/ci/docker_images_check.py
+++ b/tests/ci/docker_images_check.py
@@ -103,10 +103,11 @@ def build_and_push_one_image(
from_tag: Optional[str] = None,
) -> Tuple[bool, Path]:
logging.info(
- "Building docker image %s with version %s from path %s",
+ "Building docker image %s with version %s from path %s, from_tag: %s",
image.repo,
version_string,
image.path,
+ from_tag,
)
build_log = (
Path(TEMP_PATH)
diff --git a/tests/ci/docker_images_helper.py b/tests/ci/docker_images_helper.py
index 6fb81c266f88..f58df34439eb 100644
--- a/tests/ci/docker_images_helper.py
+++ b/tests/ci/docker_images_helper.py
@@ -6,9 +6,9 @@
from pathlib import Path
from typing import Any, Dict, List, Optional
+from env_helper import ROOT_DIR, DOCKER_TAG, DOCKER_PASSWORD
from ci_utils import Shell
-from env_helper import DOCKER_TAG, ROOT_DIR
-from get_robot_token import get_parameter_from_ssm
+
IMAGES_FILE_PATH = Path("docker/images.json")
@@ -20,9 +20,9 @@ def docker_login(relogin: bool = True) -> None:
"docker system info | grep --quiet -E 'Username|Registry'"
):
Shell.check( # pylint: disable=unexpected-keyword-arg
- "docker login --username 'robotclickhouse' --password-stdin",
+ "docker login --username 'altinityinfra' --password-stdin",
strict=True,
- stdin_str=get_parameter_from_ssm("dockerhub_robot_password"),
+ stdin_str=DOCKER_PASSWORD,
encoding="utf-8",
)
diff --git a/tests/ci/docker_manifests_merge.py b/tests/ci/docker_manifests_merge.py
index 772cbbf1b024..914e93935deb 100644
--- a/tests/ci/docker_manifests_merge.py
+++ b/tests/ci/docker_manifests_merge.py
@@ -212,7 +212,7 @@ def main():
NAME,
)
ch_helper = ClickHouseHelper()
- ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
+ ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
if status == FAILURE:
sys.exit(1)
diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py
index 95160e82513f..c0bb7239affb 100644
--- a/tests/ci/docker_server.py
+++ b/tests/ci/docker_server.py
@@ -56,7 +56,7 @@ def parse_args() -> argparse.Namespace:
"--version",
type=version_arg,
default=get_version_from_repo(git=git).string,
- help="a version to build, automaticaly got from version_helper, accepts either "
+ help="a version to build, automatically got from version_helper, accepts either "
"tag ('refs/tags/' is removed automatically) or a normal 22.2.2.2 format",
)
parser.add_argument(
@@ -211,7 +211,7 @@ def build_and_push_image(
init_args = ["docker", "buildx", "build"]
if push:
init_args.append("--push")
- init_args.append("--output=type=image,push-by-digest=true")
+ init_args.append("--output=type=image")
init_args.append(f"--tag={image.repo}")
else:
init_args.append("--output=type=docker")
@@ -302,7 +302,7 @@ def test_docker_library(test_results: TestResults) -> None:
tr.name
for tr in test_results
if (
- tr.name.startswith("clickhouse/clickhouse-server")
+ tr.name.startswith("altinityinfra/clickhouse-server")
and "alpine" not in tr.name
)
]
@@ -362,10 +362,10 @@ def main():
assert not args.image_path and not args.image_repo
if "server image" in args.check_name:
image_path = "docker/server"
- image_repo = "clickhouse/clickhouse-server"
+ image_repo = "altinityinfra/clickhouse-server"
elif "keeper image" in args.check_name:
image_path = "docker/keeper"
- image_repo = "clickhouse/clickhouse-keeper"
+ image_repo = "altinityinfra/clickhouse-keeper"
else:
assert False, "Invalid --check-name"
else:
@@ -382,7 +382,8 @@ def main():
push = True
image = DockerImageData(image_path, image_repo, False)
- tags = gen_tags(args.version, args.tag_type)
+ tags = [f'{pr_info.number}-{args.version}']
+
repo_urls = {}
direct_urls: Dict[str, List[str]] = {}
diff --git a/tests/ci/docs_check.py b/tests/ci/docs_check.py
index cb2aa3054406..1e48a3bf5aee 100644
--- a/tests/ci/docs_check.py
+++ b/tests/ci/docs_check.py
@@ -69,9 +69,7 @@ def main():
elif args.force:
logging.info("Check the docs because of force flag")
- docker_image = get_docker_image("clickhouse/docs-builder")
- if args.pull_image:
- docker_image = pull_image(docker_image)
+ docker_image = pull_image(get_docker_image("altinityinfra/docs-builder"))
test_output = temp_path / "docs_check"
test_output.mkdir(parents=True, exist_ok=True)
diff --git a/tests/ci/env_helper.py b/tests/ci/env_helper.py
index 2492c67913b4..ac22e90a9288 100644
--- a/tests/ci/env_helper.py
+++ b/tests/ci/env_helper.py
@@ -18,7 +18,7 @@
CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN")
GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH", "")
GITHUB_JOB = os.getenv("GITHUB_JOB_OVERRIDDEN", "") or os.getenv("GITHUB_JOB", "local")
-GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")
+GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "Altinity/ClickHouse")
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0")
GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com")
GITHUB_UPSTREAM_REPOSITORY = os.getenv(
@@ -29,9 +29,13 @@
IMAGES_PATH = os.getenv("IMAGES_PATH", TEMP_PATH)
REPO_COPY = os.getenv("REPO_COPY", GITHUB_WORKSPACE)
RUNNER_TEMP = os.getenv("RUNNER_TEMP", p.abspath(p.join(module_dir, "./tmp")))
-S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds")
-S3_BUILDS_BUCKET_PUBLIC = "clickhouse-builds"
-S3_TEST_REPORTS_BUCKET = os.getenv("S3_TEST_REPORTS_BUCKET", "clickhouse-test-reports")
+
+S3_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
+S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "altinity-build-artifacts")
+S3_BUILDS_BUCKET_PUBLIC = "altinity-build-artifacts"
+S3_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
+S3_TEST_REPORTS_BUCKET = os.getenv("S3_TEST_REPORTS_BUCKET", "altinity-build-artifacts")
+
S3_URL = os.getenv("S3_URL", "https://s3.amazonaws.com")
S3_DOWNLOAD = os.getenv("S3_DOWNLOAD", S3_URL)
S3_ARTIFACT_DOWNLOAD_TEMPLATE = (
@@ -39,3 +43,8 @@
"{pr_or_release}/{commit}/{build_name}/{artifact}"
)
CI_CONFIG_PATH = f"{TEMP_PATH}/ci_config.json"
+CLICKHOUSE_TEST_STAT_LOGIN = os.getenv("CLICKHOUSE_TEST_STAT_LOGIN")
+CLICKHOUSE_TEST_STAT_PASSWORD = os.getenv("CLICKHOUSE_TEST_STAT_PASSWORD")
+CLICKHOUSE_TEST_STAT_URL = os.getenv("CLICKHOUSE_TEST_STAT_URL")
+DOCKER_PASSWORD = os.getenv("DOCKER_PASSWORD")
+ROBOT_TOKEN = os.getenv("ROBOT_TOKEN")
diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py
index fe272b9747d9..29bab4a8b58d 100644
--- a/tests/ci/fast_test_check.py
+++ b/tests/ci/fast_test_check.py
@@ -82,7 +82,7 @@ def main():
pr_info = PRInfo()
- docker_image = pull_image(get_docker_image("clickhouse/fasttest"))
+ docker_image = pull_image(get_docker_image("altinityinfra/fasttest"))
workspace = temp_path / "fasttest-workspace"
workspace.mkdir(parents=True, exist_ok=True)
diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py
index f9df2fde4fa0..35fbf6c4a822 100644
--- a/tests/ci/functional_test_check.py
+++ b/tests/ci/functional_test_check.py
@@ -18,7 +18,6 @@
from docker_images_helper import DockerImage, get_docker_image
from download_release_packages import download_last_release
from env_helper import REPO_COPY, REPORT_PATH, TEMP_PATH
-from get_robot_token import get_parameter_from_ssm
from pr_info import PRInfo
from report import (
ERROR,
@@ -39,9 +38,17 @@
class SensitiveFormatter(logging.Formatter):
@staticmethod
def _filter(s):
- return re.sub(
- r"(.*)(AZURE_CONNECTION_STRING.*\')(.*)", r"\1AZURE_CONNECTION_STRING\3", s
+ s = re.sub(r"(.*)(AZURE_STORAGE_KEY\S*\')(.*)", r"\1AZURE_STORAGE_KEY\3", s)
+ s = re.sub(r"(.*)(AZURE_ACCOUNT_NAME\S*\')(.*)", r"\1AZURE_ACCOUNT_NAME\3", s)
+ s = re.sub(
+ r"(.*)(AZURE_CONTAINER_NAME\S*\')(.*)", r"\1AZURE_CONTAINER_NAME\3", s
)
+ s = re.sub(
+ r"(.*)(AZURE_STORAGE_ACCOUNT_URL\S*\')(.*)",
+ r"\1AZURE_STORAGE_ACCOUNT_URL\3",
+ s,
+ )
+ return s
def format(self, record):
original = logging.Formatter.format(self, record)
@@ -52,8 +59,16 @@ def get_additional_envs(
check_name: str, run_by_hash_num: int, run_by_hash_total: int
) -> List[str]:
result = []
- azure_connection_string = get_parameter_from_ssm("azure_connection_string")
- result.append(f"AZURE_CONNECTION_STRING='{azure_connection_string}'")
+ # Get Azure credentials from environment variables
+ azure_account_name = os.environ.get("AZURE_ACCOUNT_NAME")
+
+ if azure_account_name:
+ result.append(f"AZURE_ACCOUNT_NAME='{azure_account_name}'")
+ result.append(f"AZURE_STORAGE_KEY='{os.environ['AZURE_STORAGE_KEY']}'")
+ result.append(f"AZURE_CONTAINER_NAME='{os.environ['AZURE_CONTAINER_NAME']}'")
+ result.append(
+ f"AZURE_STORAGE_ACCOUNT_URL='{os.environ['AZURE_STORAGE_ACCOUNT_URL']}'"
+ )
if "DatabaseReplicated" in check_name:
result.append("USE_DATABASE_REPLICATED=1")
if "DatabaseOrdinary" in check_name:
@@ -67,9 +82,6 @@ def get_additional_envs(
result.append("RANDOMIZE_OBJECT_KEY_TYPE=1")
if "analyzer" in check_name:
result.append("USE_OLD_ANALYZER=1")
- if "azure" in check_name:
- assert "USE_S3_STORAGE_FOR_MERGE_TREE=1" not in result
- result.append("USE_AZURE_STORAGE_FOR_MERGE_TREE=1")
if run_by_hash_total != 0:
result.append(f"RUN_BY_HASH_NUM={run_by_hash_num}")
@@ -80,9 +92,9 @@ def get_additional_envs(
def get_image_name(check_name: str) -> str:
if "stateless" in check_name.lower() or "validation" in check_name.lower():
- return "clickhouse/stateless-test"
+ return "altinityinfra/stateless-test"
if "stateful" in check_name.lower():
- return "clickhouse/stateful-test"
+ return "altinityinfra/stateful-test"
raise ValueError(f"Cannot deduce image name based on check name {check_name}")
diff --git a/tests/ci/get_robot_token.py b/tests/ci/get_robot_token.py
index 662057f72d52..f9b30beb178f 100644
--- a/tests/ci/get_robot_token.py
+++ b/tests/ci/get_robot_token.py
@@ -10,6 +10,7 @@
from github.GithubException import BadCredentialsException
from github.NamedUser import NamedUser
+from env_helper import ROBOT_TOKEN
@dataclass
class Token:
@@ -56,12 +57,15 @@ def get_parameters_from_ssm(
return results
+# NOTE(Arthur Passos): Original CI code uses the "_original" version of this method. Each robot token is rate limited
+# and the original implementation selects the "best one". To make it simpler and iterate faster,
+# we are using only one robot and keeping the method signature. In the future we might reconsider
+# having multiple robot tokens
+def get_best_robot_token():
+ return ROBOT_TOKEN
-ROBOT_TOKEN = None # type: Optional[Token]
-
-
-def get_best_robot_token(tokens_path: str = "/github-tokens") -> str:
- global ROBOT_TOKEN # pylint:disable=global-statement
+def get_best_robot_token_original(tokens_path: str = "/github-tokens") -> str:
+ global ROBOT_TOKEN
if ROBOT_TOKEN is not None:
return ROBOT_TOKEN.value
client = boto3.client("ssm", region_name="us-east-1")
diff --git a/tests/ci/git_helper.py b/tests/ci/git_helper.py
index 511f985bc2ab..e24f83de3eda 100644
--- a/tests/ci/git_helper.py
+++ b/tests/ci/git_helper.py
@@ -18,13 +18,30 @@
logger = logging.getLogger(__name__)
+class VersionType:
+ LTS = "lts"
+ NEW = "new"
+ PRESTABLE = "altinityedge"
+ STABLE = "altinitystable"
+ TESTING = "altinitytest"
+ ANTALYA = "altinityantalya"
+
+ VALID = (NEW, TESTING, PRESTABLE, STABLE, LTS, ANTALYA,
+ # NOTE (vnemkov): we don't use those directly, but it is used in unit-tests
+ "stable",
+ "prestable",
+ "testing",
+ )
+
# ^ and $ match subline in `multiple\nlines`
# \A and \Z match only start and end of the whole string
+# NOTE (vnemkov): support both upstream tag style: v22.x.y.z-lts and Altinity tag style: v22.x.y.z.altinitystable
+# Because at early release stages there could be no Altinity tag set on commit, only upstream one.
RELEASE_BRANCH_REGEXP = r"\A\d+[.]\d+\Z"
TAG_REGEXP = (
r"\Av\d{2}" # First two digits of major part
r"([.][1-9]\d*){3}" # minor.patch.tweak parts
- r"-(new|testing|prestable|stable|lts)\Z" # suffix with a version type
+ fr"[\.-]({'|'.join(VersionType.VALID)})\Z" # suffix with a version type
)
SHA_REGEXP = re.compile(r"\A([0-9]|[a-f]){40}\Z")
@@ -33,9 +50,9 @@
with tempfile.NamedTemporaryFile("w", delete=False) as f:
GIT_KNOWN_HOSTS_FILE = f.name
- GIT_PREFIX = ( # All commits to remote are done as robot-clickhouse
- "git -c user.email=robot-clickhouse@users.noreply.github.com "
- "-c user.name=robot-clickhouse -c commit.gpgsign=false "
+ GIT_PREFIX = ( # All commits to remote are done as altinity-robot
+ "git -c user.email=altinity-robot@users.noreply.github.com "
+ "-c user.name=altinity-robot -c commit.gpgsign=false "
"-c core.sshCommand="
f"'ssh -o UserKnownHostsFile={GIT_KNOWN_HOSTS_FILE} "
"-o StrictHostKeyChecking=accept-new'"
@@ -234,6 +251,7 @@ def __init__(self, ignore_no_tags: bool = False):
self.sha_short = ""
self.commits_since_latest = 0
self.commits_since_new = 0
+ self.commits_since_upstream = 0 # commits since upstream tag
self.update()
def update(self):
@@ -252,13 +270,19 @@ def update(self):
return
self._update_tags()
+ def _commits_since(self, ref_name):
+ return int(
+ self.run(f"git rev-list {ref_name}..HEAD --count")
+ )
+
def _update_tags(self, suppress_stderr: bool = False) -> None:
stderr = subprocess.DEVNULL if suppress_stderr else None
self.latest_tag = self.run("git describe --tags --abbrev=0", stderr=stderr)
- # Format should be: {latest_tag}-{commits_since_tag}-g{sha_short}
- self.commits_since_latest = int(
- self.run(f"git rev-list {self.latest_tag}..HEAD --count")
- )
+ self.commits_since_latest = self._commits_since(self.latest_tag)
+
+ self.latest_upstream_tag = self.run("git describe --tags --abbrev=0 --match='*-*'", stderr=stderr)
+ self.commits_since_upstream = None if not self.latest_upstream_tag else self._commits_since(self.latest_upstream_tag)
+
if self.latest_tag.endswith("-new"):
# We won't change the behaviour of the the "latest_tag"
# So here we set "new_tag" to the previous tag in the graph, that will allow
@@ -267,9 +291,7 @@ def _update_tags(self, suppress_stderr: bool = False) -> None:
f"git describe --tags --abbrev=0 --exclude='{self.latest_tag}'",
stderr=stderr,
)
- self.commits_since_new = int(
- self.run(f"git rev-list {self.new_tag}..HEAD --count")
- )
+ self.commits_since_new = self._commits_since(self.new_tag)
@staticmethod
def check_tag(value: str) -> None:
diff --git a/tests/ci/install_check.py b/tests/ci/install_check.py
index 497dc820e25a..34572d14a3a4 100644
--- a/tests/ci/install_check.py
+++ b/tests/ci/install_check.py
@@ -16,8 +16,8 @@
from stopwatch import Stopwatch
from tee_popen import TeePopen
-RPM_IMAGE = "clickhouse/install-rpm-test"
-DEB_IMAGE = "clickhouse/install-deb-test"
+RPM_IMAGE = "altinityinfra/install-rpm-test"
+DEB_IMAGE = "altinityinfra/install-deb-test"
TEMP_PATH = Path(f"{REPO_COPY}/ci/tmp/")
LOGS_PATH = TEMP_PATH / "tests_logs"
@@ -30,7 +30,10 @@ def prepare_test_scripts():
echo "$test_env" >> /etc/default/clickhouse
systemctl restart clickhouse-server
clickhouse-client -q 'SELECT version()'
-grep "$test_env" /proc/$(cat /var/run/clickhouse-server/clickhouse-server.pid)/environ"""
+grep "$test_env" /proc/$(cat /var/run/clickhouse-server/clickhouse-server.pid)/environ
+echo "Check Stacktrace"
+output=$(clickhouse-local --stacktrace --query="SELECT throwIf(1,'throw')" 2>&1 >/dev/null || true)
+echo "$output" | grep 'FunctionThrowIf::executeImpl'"""
initd_test = r"""#!/bin/bash
set -e
trap "bash -ex /packages/preserve_logs.sh" ERR
diff --git a/tests/ci/integration_test_images.py b/tests/ci/integration_test_images.py
index 89c4797c0e87..478ef2598f4b 100644
--- a/tests/ci/integration_test_images.py
+++ b/tests/ci/integration_test_images.py
@@ -1,19 +1,18 @@
#!/usr/bin/env python3
IMAGES_ENV = {
- "clickhouse/dotnet-client": "DOCKER_DOTNET_CLIENT_TAG",
- "clickhouse/integration-helper": "DOCKER_HELPER_TAG",
- "clickhouse/integration-test": "DOCKER_BASE_TAG",
- "clickhouse/integration-tests-runner": "",
- "clickhouse/kerberos-kdc": "DOCKER_KERBEROS_KDC_TAG",
- "clickhouse/mysql-golang-client": "DOCKER_MYSQL_GOLANG_CLIENT_TAG",
- "clickhouse/mysql-java-client": "DOCKER_MYSQL_JAVA_CLIENT_TAG",
- "clickhouse/mysql-js-client": "DOCKER_MYSQL_JS_CLIENT_TAG",
- "clickhouse/mysql-php-client": "DOCKER_MYSQL_PHP_CLIENT_TAG",
- "clickhouse/nginx-dav": "DOCKER_NGINX_DAV_TAG",
- "clickhouse/postgresql-java-client": "DOCKER_POSTGRESQL_JAVA_CLIENT_TAG",
- "clickhouse/python-bottle": "DOCKER_PYTHON_BOTTLE_TAG",
- "clickhouse/integration-test-with-unity-catalog": "DOCKER_BASE_WITH_UNITY_CATALOG_TAG",
+ "altinityinfra/dotnet-client": "DOCKER_DOTNET_CLIENT_TAG",
+ "altinityinfra/integration-helper": "DOCKER_HELPER_TAG",
+ "altinityinfra/integration-test": "DOCKER_BASE_TAG",
+ "altinityinfra/integration-tests-runner": "",
+ "altinityinfra/kerberos-kdc": "DOCKER_KERBEROS_KDC_TAG",
+ "altinityinfra/mysql-golang-client": "DOCKER_MYSQL_GOLANG_CLIENT_TAG",
+ "altinityinfra/mysql-java-client": "DOCKER_MYSQL_JAVA_CLIENT_TAG",
+ "altinityinfra/mysql-js-client": "DOCKER_MYSQL_JS_CLIENT_TAG",
+ "altinityinfra/mysql-php-client": "DOCKER_MYSQL_PHP_CLIENT_TAG",
+ "altinityinfra/nginx-dav": "DOCKER_NGINX_DAV_TAG",
+ "altinityinfra/postgresql-java-client": "DOCKER_POSTGRESQL_JAVA_CLIENT_TAG",
+ "altinityinfra/python-bottle": "DOCKER_PYTHON_BOTTLE_TAG",
}
IMAGES = list(IMAGES_ENV.keys())
diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py
index 86c26fd172e0..3eadac78230b 100755
--- a/tests/ci/integration_tests_runner.py
+++ b/tests/ci/integration_tests_runner.py
@@ -15,7 +15,7 @@
import time
from collections import OrderedDict, defaultdict
from itertools import chain
-from typing import Any, Dict, Final, List, Optional, Set, Tuple
+from typing import Any, Dict, Final, List, Optional, Union, Set, Tuple
import yaml # type: ignore[import-untyped]
@@ -26,14 +26,14 @@
from stopwatch import Stopwatch
from tee_popen import TeePopen
-MAX_RETRY = 1
-NUM_WORKERS = 5
+MAX_RETRY = 2
+NUM_WORKERS = 10
SLEEP_BETWEEN_RETRIES = 5
PARALLEL_GROUP_SIZE = 100
CLICKHOUSE_BINARY_PATH = "usr/bin/clickhouse"
-FLAKY_TRIES_COUNT = 2 # run whole pytest several times
-FLAKY_REPEAT_COUNT = 3 # runs test case in single module several times
+FLAKY_TRIES_COUNT = 3 # run whole pytest several times
+FLAKY_REPEAT_COUNT = 5 # runs test case in single module several times
MAX_TIME_SECONDS = 3600
MAX_TIME_IN_SANDBOX = 20 * 60 # 20 minutes
@@ -226,7 +226,7 @@ def _pre_pull_images(self):
cmd = (
f"cd {self.repo_path}/tests/integration && "
- f"timeout --verbose --signal=KILL 1h ./runner {self._get_runner_opts()} {image_cmd} "
+ f"timeout --verbose --signal=KILL 2h ./runner {self._get_runner_opts()} {image_cmd} "
"--pre-pull --command ' echo Pre Pull finished ' "
)
@@ -358,7 +358,7 @@ def all_tests(self) -> List[str]:
report_file = "runner_get_all_tests.jsonl"
cmd = (
f"cd {self.repo_path}/tests/integration && "
- f"timeout --signal=KILL 1h ./runner {runner_opts} {image_cmd} -- "
+ f"timeout --signal=KILL 2h ./runner {runner_opts} {image_cmd} -- "
f"--setup-plan --report-log={report_file}"
)
@@ -414,6 +414,19 @@ def _get_parallel_tests_skip_list(repo_path):
skip_list_tests = yaml.safe_load(skip_list_file)
return list(sorted(skip_list_tests))
+ @staticmethod
+ def _get_broken_tests_list(repo_path: str) -> dict:
+ skip_list_file_path = f"{repo_path}/tests/broken_tests.json"
+ if (
+ not os.path.isfile(skip_list_file_path)
+ or os.path.getsize(skip_list_file_path) == 0
+ ):
+ return {}
+
+ with open(skip_list_file_path, "r", encoding="utf-8") as skip_list_file:
+ skip_list_tests = json.load(skip_list_file)
+ return skip_list_tests
+
@staticmethod
def group_test_by_file(tests):
result = OrderedDict() # type: OrderedDict
@@ -448,6 +461,69 @@ def _update_counters(
for test in current_counters[state]:
main_counters[state].append(test)
+ def _handle_broken_tests(
+ self,
+ counters: Dict[str, List[str]],
+ known_broken_tests: Dict[str, Dict[str, str]],
+ log_paths: Union[Dict[str, List[str]], List[str]],
+ ) -> None:
+
+ def get_log_paths(test_name):
+ """Could be a list of logs for all tests or a dict with test name as a key"""
+ if isinstance(log_paths, dict):
+ return log_paths[test_name]
+ return log_paths
+
+ broken_tests_log = os.path.join(self.result_path, "broken_tests_handler.log")
+
+ with open(broken_tests_log, "a") as log_file:
+ log_file.write(f"{len(known_broken_tests)} Known broken tests\n")
+ for status, tests in counters.items():
+ log_file.write(f"Total tests in {status} state: {len(tests)}\n")
+
+ for fail_status in ("ERROR", "FAILED"):
+ for failed_test in counters[fail_status].copy():
+ log_file.write(
+ f"Checking test {failed_test} (status: {fail_status})\n"
+ )
+ if failed_test not in known_broken_tests.keys():
+ log_file.write(
+ f"Test {failed_test} is not in known broken tests\n"
+ )
+ else:
+ fail_message = known_broken_tests[failed_test].get("message")
+
+ if not fail_message:
+ log_file.write(
+ "No fail message specified, marking as broken\n"
+ )
+ mark_as_broken = True
+ else:
+ log_file.write(
+ f"Looking for fail message: {fail_message}\n"
+ )
+ mark_as_broken = False
+ for log_path in get_log_paths(failed_test):
+ if log_path.endswith(".log"):
+ log_file.write(f"Checking log file: {log_path}\n")
+ with open(log_path) as test_log:
+ if fail_message in test_log.read():
+ log_file.write(
+ "Found fail message in logs\n"
+ )
+ mark_as_broken = True
+ break
+
+ if mark_as_broken:
+ log_file.write(f"Moving test to BROKEN state\n")
+ counters[fail_status].remove(failed_test)
+ counters["BROKEN"].append(failed_test)
+ else:
+ log_file.write("Test not marked as broken\n")
+
+ for status, tests in counters.items():
+ log_file.write(f"Total tests in {status} state: {len(tests)}\n")
+
def _get_runner_image_cmd(self):
image_cmd = ""
if self._can_run_with(
@@ -455,7 +531,7 @@ def _get_runner_image_cmd(self):
"--docker-image-version",
):
for img in IMAGES:
- if img == "clickhouse/integration-tests-runner":
+ if img == "altinityinfra/integration-tests-runner":
runner_version = self.get_image_version(img)
logging.info(
"Can run with custom docker image version %s", runner_version
@@ -727,6 +803,7 @@ def run_flaky_check(self, build_path, should_fail=False):
} # type: Dict
tests_times = defaultdict(float) # type: Dict
tests_log_paths = defaultdict(list)
+ known_broken_tests = self._get_broken_tests_list(self.repo_path)
id_counter = 0
for test_to_run in tests_to_run:
tries_num = 1 if should_fail else FLAKY_TRIES_COUNT
@@ -744,6 +821,10 @@ def run_flaky_check(self, build_path, should_fail=False):
1,
FLAKY_REPEAT_COUNT,
)
+
+ # Handle broken tests on the group counters that contain test results for a single group
+ self._handle_broken_tests(group_counters, known_broken_tests, log_paths)
+
id_counter = id_counter + 1
for counter, value in group_counters.items():
logging.info(
@@ -922,6 +1003,7 @@ def run_normal_check(self, build_path):
tests_times = defaultdict(float)
tests_log_paths = defaultdict(list)
items_to_run = list(grouped_tests.items())
+ known_broken_tests = self._get_broken_tests_list(self.repo_path)
logging.info("Total test groups %s", len(items_to_run))
if self.shuffle_test_groups():
logging.info("Shuffling test groups")
@@ -934,6 +1016,10 @@ def run_normal_check(self, build_path):
group_counters, group_test_times, log_paths = self.try_run_test_group(
"1h", group, tests, MAX_RETRY, NUM_WORKERS, 0
)
+
+ # Handle broken tests on the group counters that contain test results for a single group
+ self._handle_broken_tests(group_counters, known_broken_tests, log_paths)
+
total_tests = 0
for counter, value in group_counters.items():
logging.info(
diff --git a/tests/ci/jepsen_check.py b/tests/ci/jepsen_check.py
index 274ed31d9840..b5d4c5309a5b 100644
--- a/tests/ci/jepsen_check.py
+++ b/tests/ci/jepsen_check.py
@@ -26,10 +26,10 @@
KEEPER_DESIRED_INSTANCE_COUNT = 3
SERVER_DESIRED_INSTANCE_COUNT = 4
-KEEPER_IMAGE_NAME = "clickhouse/keeper-jepsen-test"
+KEEPER_IMAGE_NAME = "altinityinfra/keeper-jepsen-test"
KEEPER_CHECK_NAME = CI.JobNames.JEPSEN_KEEPER
-SERVER_IMAGE_NAME = "clickhouse/server-jepsen-test"
+SERVER_IMAGE_NAME = "altinityinfra/server-jepsen-test"
SERVER_CHECK_NAME = CI.JobNames.JEPSEN_SERVER
SUCCESSFUL_TESTS_ANCHOR = "# Successful tests"
diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py
index 86573b3e37cc..4a95ce74d28b 100644
--- a/tests/ci/libfuzzer_test_check.py
+++ b/tests/ci/libfuzzer_test_check.py
@@ -238,7 +238,7 @@ def main():
run_by_hash_num = 0
run_by_hash_total = 0
- docker_image = pull_image(get_docker_image("clickhouse/libfuzzer"))
+ docker_image = pull_image(get_docker_image("altinityinfra/libfuzzer"))
fuzzers_path = temp_path / "fuzzers"
fuzzers_path.mkdir(parents=True, exist_ok=True)
diff --git a/tests/ci/performance_comparison_check.py b/tests/ci/performance_comparison_check.py
index 7c344e88a60e..204d63892b04 100644
--- a/tests/ci/performance_comparison_check.py
+++ b/tests/ci/performance_comparison_check.py
@@ -30,7 +30,7 @@
from stopwatch import Stopwatch
from tee_popen import TeePopen
-IMAGE_NAME = "clickhouse/performance-comparison"
+IMAGE_NAME = "altinityinfra/performance-comparison"
def get_run_command(
@@ -99,7 +99,7 @@ def main():
if pr_info.number == 0:
pr_link = commit.html_url
else:
- pr_link = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_info.number}"
+ pr_link = f"https://github.com/Altinity/ClickHouse/pull/{pr_info.number}"
docker_env += (
f' -e CHPC_ADD_REPORT_LINKS="'
@@ -206,7 +206,7 @@ def main():
def too_many_slow(msg):
match = re.search(r"(|.* )(\d+) slower.*", msg)
# This threshold should be synchronized with the value in
- # https://github.com/ClickHouse/ClickHouse/blob/master/docker/test/performance-comparison/report.py#L629
+ # https://github.com/Altinity/ClickHouse/blob/master/docker/test/performance-comparison/report.py#L629
threshold = 5
return int(match.group(2).strip()) > threshold if match else False
diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py
index e7956a1a016e..2b5b32f511c4 100644
--- a/tests/ci/pr_info.py
+++ b/tests/ci/pr_info.py
@@ -83,12 +83,16 @@ def get_pr_for_commit(sha, ref):
return pr
our_prs.append(pr)
logging.warning(
- "Cannot find PR with required ref %s, sha %s - returning first one",
+ "Cannot find PR with required ref %s, sha %s",
ref,
sha,
)
- first_pr = our_prs[0]
- return first_pr
+ # NOTE(vnemkov): IMO returning possibly unrelated PR is bad and breaks CI/CD down the road
+ # if len(our_prs) != 0:
+ # first_pr = our_prs[0]
+ # return first_pr
+ # else:
+ return None
except Exception as ex:
logging.error(
"Cannot fetch PR info from commit ref %s, sha %s, exception: %s",
@@ -139,8 +143,9 @@ def __init__(
ref = github_event.get("ref", "refs/heads/master")
if ref and ref.startswith("refs/heads/"):
ref = ref[11:]
+ self.ref = ref # type: str e.g. "refs/pull/509/merge" or "refs/tags/v24.3.12.76.altinitystable"
# Default values
- self.base_ref = "" # type: str
+ self.base_ref = github_event.get("base_ref","") # type: str
self.base_name = "" # type: str
self.head_ref = "" # type: str
self.head_name = "" # type: str
@@ -244,7 +249,7 @@ def __init__(
pull_request = get_pr_for_commit(self.sha, github_event["ref"])
if pull_request is None or pull_request["state"] == "closed":
- # it's merged PR to master
+ # it's merged PR to master, or there is no PR (build against specific commit or tag)
self.number = 0
if pull_request:
self.merged_pr = pull_request["number"]
@@ -253,9 +258,13 @@ def __init__(
self.base_name = self.repo_full_name
self.head_ref = ref
self.head_name = self.repo_full_name
- self.diff_urls.append(
- self.compare_url(github_event["before"], self.sha)
- )
+ before_sha = github_event["before"]
+ # in case of just a tag on exsiting commit, "before_sha" is 0000000000000000000000000000000000000000
+ # Hence it is a special case and basically nothing changed, there is no need to compose a diff url
+ if not all(x == '0' for x in before_sha):
+ self.diff_urls.append(
+ self.compare_url(before_sha, self.sha)
+ )
else:
self.number = pull_request["number"]
self.labels = {label["name"] for label in pull_request["labels"]}
diff --git a/tests/ci/release/packaging/ansible/inventory/localhost.yml b/tests/ci/release/packaging/ansible/inventory/localhost.yml
new file mode 100644
index 000000000000..032215decb99
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/inventory/localhost.yml
@@ -0,0 +1,73 @@
+all:
+ hosts:
+ localhost:
+ ansible_connection: local
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ repo_dns_name: "builds.staging.altinity.cloud"
+ aws_region: "us-east-1"
+ repo_subtitle: "Staging Builds"
+ repo_title: "Altinity {{ repo_subtitle }}"
+ repo_name: "altinity-staging"
+ repo_deb_dist: "stable"
+ repo_install_packages: "clickhouse-server clickhouse-client"
+ deb_pkg_names:
+ - "clickhouse-client_{{ pkgver }}_amd64.deb"
+ - "clickhouse-common-static_{{ pkgver }}_amd64.deb"
+ - "clickhouse-common-static-dbg_{{ pkgver }}_amd64.deb"
+ - "clickhouse-server_{{ pkgver }}_amd64.deb"
+ - "clickhouse-client_{{ pkgver }}_arm64.deb"
+ - "clickhouse-common-static_{{ pkgver }}_arm64.deb"
+ - "clickhouse-common-static-dbg_{{ pkgver }}_arm64.deb"
+ - "clickhouse-server_{{ pkgver }}_arm64.deb"
+ rpm_pkg_names:
+ - "clickhouse-client-{{ pkgver }}.x86_64.rpm"
+ - "clickhouse-common-static-{{ pkgver }}.x86_64.rpm"
+ - "clickhouse-common-static-dbg-{{ pkgver }}.x86_64.rpm"
+ - "clickhouse-server-{{ pkgver }}.x86_64.rpm"
+ - "clickhouse-client-{{ pkgver }}.aarch64.rpm"
+ - "clickhouse-common-static-{{ pkgver }}.aarch64.rpm"
+ - "clickhouse-common-static-dbg-{{ pkgver }}.aarch64.rpm"
+ - "clickhouse-server-{{ pkgver }}.aarch64.rpm"
+ tgz_pkg_names:
+ - "clickhouse-client-{{ pkgver }}-amd64.tgz"
+ - "clickhouse-common-static-{{ pkgver }}-amd64.tgz"
+ - "clickhouse-common-static-dbg-{{ pkgver }}-amd64.tgz"
+ - "clickhouse-server-{{ pkgver }}-amd64.tgz"
+ - "clickhouse-client-{{ pkgver }}-arm64.tgz"
+ - "clickhouse-common-static-{{ pkgver }}-arm64.tgz"
+ - "clickhouse-common-static-dbg-{{ pkgver }}-arm64.tgz"
+ - "clickhouse-server-{{ pkgver }}-arm64.tgz"
+ sha512_pkg_names:
+ - "clickhouse-client-{{ pkgver }}-amd64.tgz.sha512"
+ - "clickhouse-common-static-{{ pkgver }}-amd64.tgz.sha512"
+ - "clickhouse-common-static-dbg-{{ pkgver }}-amd64.tgz.sha512"
+ - "clickhouse-server-{{ pkgver }}-amd64.tgz.sha512"
+ - "clickhouse-client-{{ pkgver }}-arm64.tgz.sha512"
+ - "clickhouse-common-static-{{ pkgver }}-arm64.tgz.sha512"
+ - "clickhouse-common-static-dbg-{{ pkgver }}-arm64.tgz.sha512"
+ - "clickhouse-server-{{ pkgver }}-arm64.tgz.sha512"
+ tgz_repos:
+ - "tgz-repo"
+ - "fips-tgz-repo"
+ - "antalya-tgz-repo"
+ - "hotfix-tgz-repo"
+ src_repos:
+ - "src-repo"
+ - "fips-src-repo"
+ - "antalya-src-repo"
+ - "hotfix-src-repo"
+ bin_repos:
+ - "bin-repo"
+ - "fips-bin-repo"
+ - "antalya-bin-repo"
+ - "hotfix-bin-repo"
+ apt_repos:
+ - "apt-repo"
+ - "fips-apt-repo"
+ - "antalya-apt-repo"
+ - "hotfix-apt-repo"
+ yum_repos:
+ - "yum-repo"
+ - "fips-yum-repo"
+ - "antalya-yum-repo"
+ - "hotfix-yum-repo"
diff --git a/tests/ci/release/packaging/ansible/roles/get_cloudfront_info/tasks/main.yml b/tests/ci/release/packaging/ansible/roles/get_cloudfront_info/tasks/main.yml
new file mode 100644
index 000000000000..b65d0cdad385
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/get_cloudfront_info/tasks/main.yml
@@ -0,0 +1,34 @@
+- name: Get existing CloudFront information
+ community.aws.cloudfront_info:
+ distribution: yes
+ domain_name_alias: "{{ repo_dns_name }}"
+ register: cloudfront_info
+
+- name: Get current CloudFront origin path
+ set_fact:
+ cloudfront_origin_path: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.Origins.Items[0].OriginPath }}"
+
+- name: Calculate the source S3 path
+ set_fact:
+ s3_repo_source_path: "s3://{{ repo_dns_name }}{{ cloudfront_origin_path }}"
+
+- name: Decide on the target (blue/green)
+ set_fact:
+ new_cloudfront_origin_path: "{{ '/Green' if cloudfront_origin_path == '/Blue' else '/Blue' }}"
+ when: 'not (push_to_active|default(false))'
+
+- name: Update the active site, when push_to_active is defined
+ set_fact:
+ new_cloudfront_origin_path: "{{ cloudfront_origin_path }}"
+ when: 'push_to_active|default(false)'
+
+- name: Calculate the target S3 path
+ set_fact:
+ s3_repo_target_path: "s3://{{ repo_dns_name }}{{ new_cloudfront_origin_path }}"
+
+- name: Show the source S3 path
+ debug:
+ msg:
+ - "Active path (current live site): {{ s3_repo_source_path }}"
+ - "Target path (to be updated): {{ s3_repo_target_path }}"
+
diff --git a/tests/ci/release/packaging/ansible/roles/publish_pkgs/tasks/main.yml b/tests/ci/release/packaging/ansible/roles/publish_pkgs/tasks/main.yml
new file mode 100644
index 000000000000..627a77d62743
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/publish_pkgs/tasks/main.yml
@@ -0,0 +1,98 @@
+- name: Log the configuration being used
+ debug:
+ msg:
+ - 'Repo DNS Name: {{ repo_dns_name }}'
+ - 'Repo Title: {{ repo_title }}'
+ - 'Repo Subtitle: {{ repo_subtitle }}'
+ - 'Repo Name: {{ repo_name }}'
+ - 'Origin Path: {{ new_cloudfront_origin_path|default("None") }}'
+ - 'Push to Active: {{ push_to_active|default(false) }}'
+
+- name: Create directory index files
+ shell: "{{ ansible_playbook_python }} {{ (playbook_dir + '/../dirindex/dirindexgen.py') | realpath }} {{ repo_dns_name }} {{ new_cloudfront_origin_path }}"
+
+- name: Get static content source path
+ set_fact:
+ static_path: "{{ (playbook_dir + '/../static') | realpath }}"
+
+- name: Get list of static content files
+ find:
+ paths: "{{ static_path }}"
+ file_type: file
+ recurse: true
+ register: static_files
+
+- name: Generate target filenames for source filenames
+ set_fact:
+ target_filenames: >-
+ {{
+ target_filenames | default({}) |
+ combine(
+ {
+ item.path:
+ item.path | regex_replace('^' + (static_path | regex_escape) + '/(.*)$', '\1')
+ }
+ )
+ }}
+ loop: "{{ static_files.files }}"
+ loop_control:
+ label: "{{ item.path }}"
+
+- name: Template HTML files to S3
+ amazon.aws.aws_s3:
+ bucket: "{{ repo_dns_name }}"
+ object: "{{ new_cloudfront_origin_path }}/{{ target_filenames[item.path] }}"
+ content: "{{ lookup('template', item.path) }}"
+ metadata: "Content-Type={{ lookup('pipe', 'file -b --mime-type ' + item.path) }}"
+ mode: put
+ when: 'item.path | basename | splitext | last == ".html"'
+ loop: "{{ static_files.files }}"
+ loop_control:
+ label: "{{ item.path }}"
+
+- name: Copy other static files to S3
+ amazon.aws.aws_s3:
+ bucket: "{{ repo_dns_name }}"
+ object: "{{ new_cloudfront_origin_path }}/{{ target_filenames[item.path] }}"
+ src: "{{ item.path }}"
+ metadata: "Content-Type={{ lookup('pipe', 'file -b --mime-type ' + item.path) }}"
+ mode: put
+ when: 'item.path | basename | splitext | last != ".html"'
+ loop: "{{ static_files.files }}"
+ loop_control:
+ label: "{{ item.path }}"
+
+- name: Update CloudFront distribution
+ community.aws.cloudfront_distribution:
+ region: "{{ aws_region }}"
+ state: present
+ caller_reference: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.CallerReference }}"
+ origins:
+ - id: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.Origins.Items[0].Id }}"
+ domain_name: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.Origins.Items[0].DomainName }}"
+ origin_path: "{{ new_cloudfront_origin_path | default(omit) }}"
+ custom_origin_config:
+ origin_protocol_policy: "http-only"
+ aliases:
+ - "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.Aliases.Items[0] }}"
+ viewer_certificate:
+ acm_certificate_arn: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.ViewerCertificate.ACMCertificateArn }}"
+ ssl_support_method: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.ViewerCertificate.SSLSupportMethod }}"
+ logging:
+ bucket: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.Logging.Bucket }}"
+ enabled: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.Logging.Enabled }}"
+ include_cookies: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.Logging.IncludeCookies }}"
+ prefix: "{{ cloudfront_info.cloudfront[repo_dns_name].Distribution.DistributionConfig.Logging.Prefix }}"
+ when: 'not (push_to_active|default(false))'
+ register: distribution
+
+- name: Invalidate CloudFront cache
+ community.aws.cloudfront_invalidation:
+ alias: "{{ repo_dns_name }}"
+ target_paths:
+ - "/*"
+ register: invalidation
+
+- name: Wait for CloudFront invalidation to be complete
+ shell: "aws cloudfront wait invalidation-completed --distribution-id {{ distribution.id }} --id {{ invalidation.invalidation.id }}"
+
diff --git a/tests/ci/release/packaging/ansible/roles/update_bin_repo/tasks/main.yml b/tests/ci/release/packaging/ansible/roles/update_bin_repo/tasks/main.yml
new file mode 100644
index 000000000000..8badef2c3551
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/update_bin_repo/tasks/main.yml
@@ -0,0 +1,52 @@
+- name: Set major version
+ set_fact:
+ major_version: "{{ pkgver.split('.')[0] | int }}"
+
+- name: Copy new binaries
+ shell:
+ cmd: |
+ mv $RUNNER_TEMP/packages/{{ item }}-bin/clickhouse {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }}/v{{ pkgver }}/self-extracting/clickhouse
+ mv $RUNNER_TEMP/packages/{{ item }}-bin/clickhouse-stripped {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }}/v{{ pkgver }}/self-extracting/clickhouse-stripped
+ mv $RUNNER_TEMP/packages/{{ item }}-bin/non-self-extracting/clickhouse {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }}/v{{ pkgver }}/non-self-extracting/clickhouse
+ mv $RUNNER_TEMP/packages/{{ item }}-bin/non-self-extracting/clickhouse-stripped {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }}/v{{ pkgver }}/non-self-extracting/clickhouse-stripped
+ loop:
+ - amd64
+ - arm64
+ when: major_version >= 24
+
+- name: Sha512 encrypt the binaries
+ shell: echo $(sha512sum {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }} | cut -d ' ' -f 1) >> {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }}.sha512
+ loop:
+ - amd64/v{{ pkgver }}/self-extracting/clickhouse
+ - amd64/v{{ pkgver }}/self-extracting/clickhouse-stripped
+ - amd64/v{{ pkgver }}/non-self-extracting/clickhouse
+ - amd64/v{{ pkgver }}/non-self-extracting/clickhouse-stripped
+ - arm64/v{{ pkgver }}/self-extracting/clickhouse
+ - arm64/v{{ pkgver }}/self-extracting/clickhouse-stripped
+ - arm64/v{{ pkgver }}/non-self-extracting/clickhouse
+ - arm64/v{{ pkgver }}/non-self-extracting/clickhouse-stripped
+ when: major_version >= 24
+
+- name: Copy new binaries
+ shell:
+ cmd: |
+ mv $RUNNER_TEMP/packages/{{ item }}-bin/clickhouse {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }}/v{{ pkgver }}/clickhouse
+ mv $RUNNER_TEMP/packages/{{ item }}-bin/clickhouse-stripped {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }}/v{{ pkgver }}/clickhouse-stripped
+ loop:
+ - amd64
+ - arm64
+ when: major_version < 24
+
+- name: Sha512 encrypt the binaries
+ shell: echo $(sha512sum {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }} | cut -d ' ' -f 1) >> {{ local_repo_path }}/{{ repo_prefix }}bin-repo/{{ item }}.sha512
+ loop:
+ - amd64/v{{ pkgver }}/clickhouse
+ - amd64/v{{ pkgver }}/clickhouse-stripped
+ - arm64/v{{ pkgver }}/clickhouse
+ - arm64/v{{ pkgver }}/clickhouse-stripped
+ when: major_version < 24
+
+- name: Sync remaining repos between source and target
+ shell: 'aws s3 sync --delete "{{ local_repo_path }}/{{ item }}" "{{ s3_repo_target_path }}/{{ item }}"'
+ loop: "{{ bin_repos }}"
+ when: cloudfront_origin_path != ""
diff --git a/tests/ci/release/packaging/ansible/roles/update_deb_repo/tasks/main.yml b/tests/ci/release/packaging/ansible/roles/update_deb_repo/tasks/main.yml
new file mode 100644
index 000000000000..1491c8c757be
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/update_deb_repo/tasks/main.yml
@@ -0,0 +1,61 @@
+- name: Copy new debs
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}apt-repo/pool/main/{{ item }}"
+ loop: "{{ deb_pkg_names }}"
+
+- name: Copy library-bridge and obdc-bridge if version newer than 24.8
+ when: (pkgver | regex_replace('^(\\d+\\.\\d+).*$', '\\1')) is version('24.8', '==')
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}apt-repo/pool/main/{{ item }}"
+ loop:
+ - "clickhouse-library-bridge_{{ pkgver }}_amd64.deb"
+ - "clickhouse-library-bridge_{{ pkgver }}_arm64.deb"
+ - "clickhouse-odbc-bridge_{{ pkgver }}_amd64.deb"
+ - "clickhouse-odbc-bridge_{{ pkgver }}_arm64.deb"
+
+- name: Sign new debs
+ shell: "dpkg-sig -k {{ gpg_key_id }} --sign builder {{ local_repo_path }}/{{ repo_prefix }}apt-repo/pool/main/{{ item }}"
+ loop: "{{ deb_pkg_names }}"
+
+- name: Sign library-bridge and obdc-bridge if version newer than 24.8
+ when: (pkgver | regex_replace('^(\\d+\\.\\d+).*$', '\\1')) is version('24.8', '==')
+ shell: "dpkg-sig -k {{ gpg_key_id }} --sign builder {{ local_repo_path }}/{{ repo_prefix }}apt-repo/pool/main/{{ item }}"
+ loop:
+ - "clickhouse-library-bridge_{{ pkgver }}_amd64.deb"
+ - "clickhouse-library-bridge_{{ pkgver }}_arm64.deb"
+ - "clickhouse-odbc-bridge_{{ pkgver }}_amd64.deb"
+ - "clickhouse-odbc-bridge_{{ pkgver }}_arm64.deb"
+
+- name: Set up apt-ftparchive config
+ template:
+ src: "{{ item }}"
+ dest: "/{{ item }}"
+ loop:
+ - "apt-ftparchive.conf"
+ - "apt-ftparchive-{{ repo_deb_dist }}.conf"
+
+- name: Build and sign {{ repo_prefix }}apt repo
+ shell:
+ chdir: "{{ local_repo_path }}/{{ repo_prefix }}apt-repo"
+ cmd: |
+ set -xe
+ apt-ftparchive generate /apt-ftparchive.conf
+ apt-ftparchive -c /apt-ftparchive-{{ repo_deb_dist }}.conf release dists/{{ repo_deb_dist }} > dists/{{ repo_deb_dist }}/Release
+ gpg -a --yes --output dists/{{ repo_deb_dist }}/Release.gpg --detach-sign dists/{{ repo_deb_dist }}/Release
+ gpg -a --yes --clearsign --output dists/{{ repo_deb_dist }}/InRelease --detach-sign dists/{{ repo_deb_dist }}/Release
+
+- name: Get file info for contents of generated repo
+ stat:
+ path: "{{ local_repo_path }}/{{ repo_prefix }}apt-repo/dists/{{ repo_deb_dist }}/Contents-amd64"
+ register: contents_stat
+
+- name: Make sure the generated repo has content
+ assert:
+ that: "contents_stat.stat.size is defined and contents_stat.stat.size > 0"
+ fail_msg: "Repo generated by apt-ftparchive has no contents"
+
+- name: Export GPG public key to repo
+ shell: 'gpg --export --armor "{{ gpg_key_name }}" > "{{ local_repo_path }}/{{ repo_prefix }}apt-repo/pubkey.gpg"'
+
+- name: Sync repos between source and target
+ shell: 'aws s3 sync --delete "{{ local_repo_path }}/{{ item }}" "{{ s3_repo_target_path }}/{{ release_environment }}/{{ item }}"'
+ loop: "{{ apt_repos }}"
+ when: cloudfront_origin_path != ""
diff --git a/tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive-stable.conf b/tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive-stable.conf
new file mode 100644
index 000000000000..1568a5cd3900
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive-stable.conf
@@ -0,0 +1,6 @@
+APT::FTPArchive::Release::Codename "stable";
+APT::FTPArchive::Release::Origin "Test repo";
+APT::FTPArchive::Release::Components "main";
+APT::FTPArchive::Release::Label "Test repo packages";
+APT::FTPArchive::Release::Architectures "all amd64 arm64";
+APT::FTPArchive::Release::Suite "stable";
diff --git a/tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive.conf b/tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive.conf
new file mode 100644
index 000000000000..86b965752d11
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive.conf
@@ -0,0 +1,17 @@
+Dir {
+ ArchiveDir "{{ local_repo_path }}/{{ repo_prefix }}apt-repo";
+};
+Default {
+ Packages::Compress ". gzip bzip2";
+ Sources::Compress ". gzip bzip2";
+ Contents::Compress ". gzip bzip2";
+};
+TreeDefault {
+ Directory "pool/$(SECTION)";
+ Packages "$(DIST)/$(SECTION)/binary-$(ARCH)/Packages";
+ Contents "$(DIST)/Contents-$(ARCH)";
+};
+Tree "dists/stable" {
+ Sections "main";
+ Architectures "all amd64 arm64";
+}
diff --git a/tests/ci/release/packaging/ansible/roles/update_rpm_repo/tasks/main.yml b/tests/ci/release/packaging/ansible/roles/update_rpm_repo/tasks/main.yml
new file mode 100644
index 000000000000..11f810392096
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/update_rpm_repo/tasks/main.yml
@@ -0,0 +1,51 @@
+- name: Copy new rpms
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}yum-repo/{{ item }}"
+ loop: "{{ rpm_pkg_names }}"
+
+- name: Copy library-bridge and obdc-bridge if version newer than 24.8
+ when: (pkgver | regex_replace('^(\\d+\\.\\d+).*$', '\\1')) is version('24.8', '==')
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}yum-repo/{{ item }}"
+ loop:
+ - "clickhouse-library-bridge-{{ pkgver }}.x86_64.rpm"
+ - "clickhouse-library-bridge-{{ pkgver }}.aarch64.rpm"
+ - "clickhouse-odbc-bridge-{{ pkgver }}.x86_64.rpm"
+ - "clickhouse-odbc-bridge-{{ pkgver }}.aarch64.rpm"
+
+- name: Create rpmmacros file
+ template:
+ src: "rpmmacros.j2"
+ dest: "{{ local_repo_path }}/rpmmacros"
+
+- name: Sign new RPMs
+ shell: 'rpmsign --addsign --load "{{ local_repo_path }}/rpmmacros" "{{ local_repo_path }}/{{ repo_prefix }}yum-repo/{{ item }}"'
+ loop: "{{ rpm_pkg_names }}"
+
+- name: Sign library-bridge and obdc-bridge if version newer than 24.8
+ when: (pkgver | regex_replace('^(\\d+\\.\\d+).*$', '\\1')) is version('24.8', '==')
+ shell: 'rpmsign --addsign --load "{{ local_repo_path }}/rpmmacros" "{{ local_repo_path }}/{{ repo_prefix }}yum-repo/{{ item }}"'
+ loop:
+ - "clickhouse-library-bridge-{{ pkgver }}.x86_64.rpm"
+ - "clickhouse-library-bridge-{{ pkgver }}.aarch64.rpm"
+ - "clickhouse-odbc-bridge-{{ pkgver }}.x86_64.rpm"
+ - "clickhouse-odbc-bridge-{{ pkgver }}.aarch64.rpm"
+
+- name: Build and sign yum repo
+ shell:
+ chdir: "{{ local_repo_path }}/{{ repo_prefix }}yum-repo"
+ cmd: |
+ set -e
+ createrepo --update .
+ gpg --detach-sign --armor --yes repodata/repomd.xml
+
+- name: Export GPG public key to repo
+ shell: 'gpg --export --armor "{{ gpg_key_name }}" > "{{ local_repo_path }}/{{ repo_prefix }}yum-repo/RPM-GPG-KEY-{{ repo_name }}"'
+
+- name: Export .repo file to repo
+ template:
+ src: "repo.j2"
+ dest: "{{ local_repo_path }}/{{ repo_prefix }}yum-repo/{{ repo_name }}.repo"
+
+- name: Sync repos between source and target
+ shell: 'aws s3 sync --delete "{{ local_repo_path }}/{{ item }}" "{{ s3_repo_target_path }}/{{ release_environment }}/{{ item }}"'
+ loop: "{{ yum_repos }}"
+ when: cloudfront_origin_path != ""
diff --git a/tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/repo.j2 b/tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/repo.j2
new file mode 100644
index 000000000000..6104a0a62ca2
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/repo.j2
@@ -0,0 +1,7 @@
+[{{ repo_name }}]
+name={{ repo_title }}
+baseurl=https://{{ repo_dns_name }}/{{ repo_prefix }}yum-repo
+enabled=1
+repo_gpgcheck=1
+gpgcheck=1
+gpgkey=https://{{ repo_dns_name }}/{{ repo_prefix }}yum-repo/RPM-GPG-KEY-{{ repo_name }}
diff --git a/tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/rpmmacros.j2 b/tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/rpmmacros.j2
new file mode 100644
index 000000000000..3fe4720607e0
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/rpmmacros.j2
@@ -0,0 +1 @@
+%_gpg_name {{ gpg_key_name }}
diff --git a/tests/ci/release/packaging/ansible/roles/update_tar_repo/tasks/main.yml b/tests/ci/release/packaging/ansible/roles/update_tar_repo/tasks/main.yml
new file mode 100644
index 000000000000..745a3df007bc
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/roles/update_tar_repo/tasks/main.yml
@@ -0,0 +1,61 @@
+- name: Copy new tgz
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}tgz-repo/{{ item }}"
+ loop: "{{ tgz_pkg_names }}"
+
+- name: Copy library-bridge and obdc-bridge tgz if version newer than 24.8
+ when: (pkgver | regex_replace('^(\\d+\\.\\d+).*$', '\\1')) is version('24.8', '==')
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}tgz-repo/{{ item }}"
+ loop:
+ - "clickhouse-library-bridge-{{ pkgver }}-amd64.tgz"
+ - "clickhouse-library-bridge-{{ pkgver }}-arm64.tgz"
+ - "clickhouse-odbc-bridge-{{ pkgver }}-amd64.tgz"
+ - "clickhouse-odbc-bridge-{{ pkgver }}-arm64.tgz"
+
+- name: Copy new tgz sha512
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}tgz-repo/{{ item }}"
+ loop: "{{ sha512_pkg_names }}"
+
+- name: Copy library-bridge and obdc-bridge sha512 if version newer than 24.8
+ when: (pkgver | regex_replace('^(\\d+\\.\\d+).*$', '\\1')) is version('24.8', '==')
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}tgz-repo/{{ item }}"
+ loop:
+ - "clickhouse-library-bridge-{{ pkgver }}-amd64.tgz.sha512"
+ - "clickhouse-library-bridge-{{ pkgver }}-arm64.tgz.sha512"
+ - "clickhouse-odbc-bridge-{{ pkgver }}-amd64.tgz.sha512"
+ - "clickhouse-odbc-bridge-{{ pkgver }}-arm64.tgz.sha512"
+
+- name: Verify sha512 from repo
+ shell: cat {{ local_repo_path }}/{{ repo_prefix }}tgz-repo/{{ item }}.sha512 | grep $(sha512sum {{ local_repo_path }}/{{ repo_prefix }}tgz-repo/{{ item }} | cut -d ' ' -f 1) || SHA_VERIFICATION="fail"
+ loop: "{{ tgz_pkg_names }}"
+
+- name: Verify library-bridge and obdc-bridge sha512 from repo
+ when: (pkgver | regex_replace('^(\\d+\\.\\d+).*$', '\\1')) is version('24.8', '==')
+ shell: cat {{ local_repo_path }}/{{ repo_prefix }}tgz-repo/{{ item }}.sha512 | grep $(sha512sum {{ local_repo_path }}/{{ repo_prefix }}tgz-repo/{{ item }} | cut -d ' ' -f 1) || SHA_VERIFICATION="fail"
+ loop:
+ - "clickhouse-library-bridge-{{ pkgver }}-amd64.tgz"
+ - "clickhouse-library-bridge-{{ pkgver }}-arm64.tgz"
+ - "clickhouse-odbc-bridge-{{ pkgver }}-amd64.tgz"
+ - "clickhouse-odbc-bridge-{{ pkgver }}-arm64.tgz"
+
+- name: save verification output
+ shell: echo $SHA_VERIFICATION
+ register: SHA_VERIFICATION
+
+- name: Verify the sha512s match
+ fail:
+ msg: "Failure, sha512 does not match."
+ when: SHA_VERIFICATION == "fail"
+
+- name: Sync remaining repos between source and target
+ shell: 'aws s3 sync --delete "{{ local_repo_path }}/{{ item }}" "{{ s3_repo_target_path }}/{{ item }}"'
+ loop: "{{ tgz_repos }}"
+ when: cloudfront_origin_path != ""
+
+- name: Copy new src
+ shell: mv $RUNNER_TEMP/packages/{{ item }} {{ local_repo_path }}/{{ repo_prefix }}src-repo/{{ item }}
+ loop: "{{ src_pkg_names }}"
+
+- name: Sync remaining repos between source and target
+ shell: 'aws s3 sync --delete "{{ local_repo_path }}/{{ item }}" "{{ s3_repo_target_path }}/{{ item }}"'
+ loop: "{{ src_repos }}"
+ when: cloudfront_origin_path != ""
diff --git a/tests/ci/release/packaging/ansible/sign-and-release.yml b/tests/ci/release/packaging/ansible/sign-and-release.yml
new file mode 100644
index 000000000000..8720387aac04
--- /dev/null
+++ b/tests/ci/release/packaging/ansible/sign-and-release.yml
@@ -0,0 +1,8 @@
+- hosts: localhost
+ roles:
+ - get_cloudfront_info
+ - update_bin_repo
+ - update_deb_repo
+ - update_rpm_repo
+ - update_tgz_repo
+ - publish_pkgs
diff --git a/tests/ci/release/packaging/dirindex/dirindexgen.py b/tests/ci/release/packaging/dirindex/dirindexgen.py
new file mode 100755
index 000000000000..6b4f3332883d
--- /dev/null
+++ b/tests/ci/release/packaging/dirindex/dirindexgen.py
@@ -0,0 +1,122 @@
+#!/bin/env python3
+
+import argparse
+import textwrap
+import boto3
+
+
+def folder_list_add(folders, key, value):
+ p = key.split('/')
+ if len(p) > 1:
+ if '' not in folders:
+ folders[''] = [set(), dict()]
+ folders[''][0].add(p[0])
+ for i in range(len(p)-1):
+ base = '/'.join(p[:i+1])
+ if base not in folders:
+ folders[base] = [set(), dict()]
+ if i == len(p)-2:
+ folders[base][1][p[i+1]] = value
+ else:
+ folders[base][0].add(p[i+1])
+
+
+def human_readable_size(size, decimal_places=2):
+ for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']:
+ if size < 1024.0 or unit == 'PiB':
+ break
+ size /= 1024.0
+ return f"{size:.{decimal_places}f} {unit}"
+
+
+def folderjoin(items, slashes=False):
+ result = "/".join([item.strip("/") for item in items if item != "" and item != "/"])
+ if slashes:
+ if not result.startswith("/"):
+ result = "/" + result
+ if not result.endswith("/"):
+ result = result + "/"
+ return result
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(dest='fqdn', help="Name of S3 bucket and domain name")
+ parser.add_argument(dest='prefix', help="S3 prefix to index")
+
+ args = parser.parse_args()
+
+ if args.prefix.startswith("/"):
+ args.prefix = args.prefix[1:]
+
+ if not args.prefix.endswith("/"):
+ args.prefix += "/"
+
+ parent_dir_str = "Parent Directory"
+
+ folders = dict()
+
+ s3 = boto3.client('s3')
+ paginator = s3.get_paginator('list_objects_v2')
+ objs = []
+ for page in paginator.paginate(Bucket=args.fqdn, Prefix=args.prefix):
+ if 'Contents' in page:
+ objs.extend(page['Contents'])
+
+ for o in objs:
+ key = o['Key']
+ if key.startswith(args.prefix):
+ key = key[len(args.prefix):]
+ folder_list_add(folders, key, o)
+
+ for folder in folders:
+ subs, files = folders[folder]
+ folders[folder] = (subs, files, 1 + max(
+ [len(parent_dir_str)] +
+ [len(s)+1 for s in subs] +
+ [len(f) for f in files]
+ ))
+
+ for folder in folders:
+ print(folder)
+ subs, files, maxlen = folders[folder]
+ indexdata = list()
+ indexdata.append(textwrap.dedent(
+ f"""\
+
+
+
+
+ Index of {folder}
+
+
+ Index of {folder}
+
+ {"Name":{maxlen}} {"Last Modified":20} {"Size":20}
+ """))
+ if folder != "":
+ parent_folder = folderjoin(folder.split("/")[:-1], slashes=True)
+ indexdata.append(f'{parent_dir_str} ' +
+ f'{" ":{maxlen-len(parent_dir_str)}} {"-":20} {"-":20}\n')
+ for sub in subs:
+ sub_path = folderjoin([folder, sub], slashes=True)
+ indexdata.append(f'{sub}/ {" ":{maxlen-len(sub)-1}}' +
+ f' {"-":20} {"-":20}\n')
+ for file in files:
+ if file != "index.html":
+ file_path = folderjoin([folder, file])
+ mtime = files[file]['LastModified']
+ size = human_readable_size(files[file]['Size'])
+ indexdata.append(f'{file} {" ":{maxlen-len(file)}}' +
+ f' {mtime:%Y-%m-%d %H:%M:%S} {size:20}\n')
+ indexdata.append(" \n\n\n")
+ s3.put_object(
+ Bucket=args.fqdn,
+ Key=folderjoin([args.prefix, folder, "index.html"]),
+ ContentType="text/html",
+ Body="".join(indexdata)
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/ci/release/packaging/static/bootstrap.bundle.min.js b/tests/ci/release/packaging/static/bootstrap.bundle.min.js
new file mode 100644
index 000000000000..b65b161a5332
--- /dev/null
+++ b/tests/ci/release/packaging/static/bootstrap.bundle.min.js
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v5.1.0 (https://getbootstrap.com/)
+ * Copyright 2011-2021 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
+ */
+!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).bootstrap=e()}(this,(function(){"use strict";const t=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i="#"+i.split("#")[1]),e=i&&"#"!==i?i.trim():null}return e},e=e=>{const i=t(e);return i&&document.querySelector(i)?i:null},i=e=>{const i=t(e);return i?document.querySelector(i):null},n=t=>{t.dispatchEvent(new Event("transitionend"))},s=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),o=t=>s(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(t):null,r=(t,e,i)=>{Object.keys(i).forEach(n=>{const o=i[n],r=e[n],a=r&&s(r)?"element":null==(l=r)?""+l:{}.toString.call(l).match(/\s([a-z]+)/i)[1].toLowerCase();var l;if(!new RegExp(o).test(a))throw new TypeError(`${t.toUpperCase()}: Option "${n}" provided type "${a}" but expected type "${o}".`)})},a=t=>!(!s(t)||0===t.getClientRects().length)&&"visible"===getComputedStyle(t).getPropertyValue("visibility"),l=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),c=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?c(t.parentNode):null},h=()=>{},d=t=>{t.offsetHeight},u=()=>{const{jQuery:t}=window;return t&&!document.body.hasAttribute("data-bs-no-jquery")?t:null},f=[],p=()=>"rtl"===document.documentElement.dir,m=t=>{var e;e=()=>{const e=u();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(f.length||document.addEventListener("DOMContentLoaded",()=>{f.forEach(t=>t())}),f.push(e)):e()},g=t=>{"function"==typeof t&&t()},_=(t,e,i=!0)=>{if(!i)return void g(t);const s=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let o=!1;const r=({target:i})=>{i===e&&(o=!0,e.removeEventListener("transitionend",r),g(t))};e.addEventListener("transitionend",r),setTimeout(()=>{o||n(e)},s)},b=(t,e,i,n)=>{let s=t.indexOf(e);if(-1===s)return t[!i&&n?t.length-1:0];const o=t.length;return s+=i?1:-1,n&&(s=(s+o)%o),t[Math.max(0,Math.min(s,o-1))]},v=/[^.]*(?=\..*)\.|.*/,y=/\..*/,w=/::\d+$/,E={};let A=1;const T={mouseenter:"mouseover",mouseleave:"mouseout"},O=/^(mouseenter|mouseleave)/i,C=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function k(t,e){return e&&`${e}::${A++}`||t.uidEvent||A++}function L(t){const e=k(t);return t.uidEvent=e,E[e]=E[e]||{},E[e]}function x(t,e,i=null){const n=Object.keys(t);for(let s=0,o=n.length;sfunction(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};n?n=t(n):i=t(i)}const[o,r,a]=D(e,i,n),l=L(t),c=l[a]||(l[a]={}),h=x(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=k(r,e.replace(v,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(let a=o.length;a--;)if(o[a]===r)return s.delegateTarget=r,n.oneOff&&P.off(t,s.type,e,i),i.apply(r,[s]);return null}}(t,i,n):function(t,e){return function i(n){return n.delegateTarget=t,i.oneOff&&P.off(t,n.type,e),e.apply(t,[n])}}(t,i);u.delegationSelector=o?i:null,u.originalHandler=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function N(t,e,i,n,s){const o=x(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function I(t){return t=t.replace(y,""),T[t]||t}const P={on(t,e,i,n){S(t,e,i,n,!1)},one(t,e,i,n){S(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=D(e,i,n),a=r!==e,l=L(t),c=e.startsWith(".");if(void 0!==o){if(!l||!l[r])return;return void N(t,l,r,o,s?i:null)}c&&Object.keys(l).forEach(i=>{!function(t,e,i,n){const s=e[i]||{};Object.keys(s).forEach(o=>{if(o.includes(n)){const n=s[o];N(t,e,i,n.originalHandler,n.delegationSelector)}})}(t,l,i,e.slice(1))});const h=l[r]||{};Object.keys(h).forEach(i=>{const n=i.replace(w,"");if(!a||e.includes(n)){const e=h[i];N(t,l,r,e.originalHandler,e.delegationSelector)}})},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=u(),s=I(e),o=e!==s,r=C.has(s);let a,l=!0,c=!0,h=!1,d=null;return o&&n&&(a=n.Event(e,i),n(t).trigger(a),l=!a.isPropagationStopped(),c=!a.isImmediatePropagationStopped(),h=a.isDefaultPrevented()),r?(d=document.createEvent("HTMLEvents"),d.initEvent(s,l,!0)):d=new CustomEvent(e,{bubbles:l,cancelable:!0}),void 0!==i&&Object.keys(i).forEach(t=>{Object.defineProperty(d,t,{get:()=>i[t]})}),h&&d.preventDefault(),c&&t.dispatchEvent(d),d.defaultPrevented&&void 0!==a&&a.preventDefault(),d}},j=new Map;var M={set(t,e,i){j.has(t)||j.set(t,new Map);const n=j.get(t);n.has(e)||0===n.size?n.set(e,i):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(n.keys())[0]}.`)},get:(t,e)=>j.has(t)&&j.get(t).get(e)||null,remove(t,e){if(!j.has(t))return;const i=j.get(t);i.delete(e),0===i.size&&j.delete(t)}};class H{constructor(t){(t=o(t))&&(this._element=t,M.set(this._element,this.constructor.DATA_KEY,this))}dispose(){M.remove(this._element,this.constructor.DATA_KEY),P.off(this._element,this.constructor.EVENT_KEY),Object.getOwnPropertyNames(this).forEach(t=>{this[t]=null})}_queueCallback(t,e,i=!0){_(t,e,i)}static getInstance(t){return M.get(o(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.1.0"}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}static get DATA_KEY(){return"bs."+this.NAME}static get EVENT_KEY(){return"."+this.DATA_KEY}}const B=(t,e="hide")=>{const n="click.dismiss"+t.EVENT_KEY,s=t.NAME;P.on(document,n,`[data-bs-dismiss="${s}"]`,(function(n){if(["A","AREA"].includes(this.tagName)&&n.preventDefault(),l(this))return;const o=i(this)||this.closest("."+s);t.getOrCreateInstance(o)[e]()}))};class R extends H{static get NAME(){return"alert"}close(){if(P.trigger(this._element,"close.bs.alert").defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback(()=>this._destroyElement(),this._element,t)}_destroyElement(){this._element.remove(),P.trigger(this._element,"closed.bs.alert"),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=R.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}B(R,"close"),m(R);class W extends H{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=W.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}function z(t){return"true"===t||"false"!==t&&(t===Number(t).toString()?Number(t):""===t||"null"===t?null:t)}function q(t){return t.replace(/[A-Z]/g,t=>"-"+t.toLowerCase())}P.on(document,"click.bs.button.data-api",'[data-bs-toggle="button"]',t=>{t.preventDefault();const e=t.target.closest('[data-bs-toggle="button"]');W.getOrCreateInstance(e).toggle()}),m(W);const F={setDataAttribute(t,e,i){t.setAttribute("data-bs-"+q(e),i)},removeDataAttribute(t,e){t.removeAttribute("data-bs-"+q(e))},getDataAttributes(t){if(!t)return{};const e={};return Object.keys(t.dataset).filter(t=>t.startsWith("bs")).forEach(i=>{let n=i.replace(/^bs/,"");n=n.charAt(0).toLowerCase()+n.slice(1,n.length),e[n]=z(t.dataset[i])}),e},getDataAttribute:(t,e)=>z(t.getAttribute("data-bs-"+q(e))),offset(t){const e=t.getBoundingClientRect();return{top:e.top+window.pageYOffset,left:e.left+window.pageXOffset}},position:t=>({top:t.offsetTop,left:t.offsetLeft})},U={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter(t=>t.matches(e)),parents(t,e){const i=[];let n=t.parentNode;for(;n&&n.nodeType===Node.ELEMENT_NODE&&3!==n.nodeType;)n.matches(e)&&i.push(n),n=n.parentNode;return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map(t=>t+':not([tabindex^="-"])').join(", ");return this.find(e,t).filter(t=>!l(t)&&a(t))}},$={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0,touch:!0},V={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean",touch:"boolean"},K="next",X="prev",Y="left",Q="right",G={ArrowLeft:Q,ArrowRight:Y};class Z extends H{constructor(t,e){super(t),this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this.touchStartX=0,this.touchDeltaX=0,this._config=this._getConfig(e),this._indicatorsElement=U.findOne(".carousel-indicators",this._element),this._touchSupported="ontouchstart"in document.documentElement||navigator.maxTouchPoints>0,this._pointerEvent=Boolean(window.PointerEvent),this._addEventListeners()}static get Default(){return $}static get NAME(){return"carousel"}next(){this._slide(K)}nextWhenVisible(){!document.hidden&&a(this._element)&&this.next()}prev(){this._slide(X)}pause(t){t||(this._isPaused=!0),U.findOne(".carousel-item-next, .carousel-item-prev",this._element)&&(n(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null}cycle(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config&&this._config.interval&&!this._isPaused&&(this._updateInterval(),this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))}to(t){this._activeElement=U.findOne(".active.carousel-item",this._element);const e=this._getItemIndex(this._activeElement);if(t>this._items.length-1||t<0)return;if(this._isSliding)return void P.one(this._element,"slid.bs.carousel",()=>this.to(t));if(e===t)return this.pause(),void this.cycle();const i=t>e?K:X;this._slide(i,this._items[t])}_getConfig(t){return t={...$,...F.getDataAttributes(this._element),..."object"==typeof t?t:{}},r("carousel",t,V),t}_handleSwipe(){const t=Math.abs(this.touchDeltaX);if(t<=40)return;const e=t/this.touchDeltaX;this.touchDeltaX=0,e&&this._slide(e>0?Q:Y)}_addEventListeners(){this._config.keyboard&&P.on(this._element,"keydown.bs.carousel",t=>this._keydown(t)),"hover"===this._config.pause&&(P.on(this._element,"mouseenter.bs.carousel",t=>this.pause(t)),P.on(this._element,"mouseleave.bs.carousel",t=>this.cycle(t))),this._config.touch&&this._touchSupported&&this._addTouchEventListeners()}_addTouchEventListeners(){const t=t=>{!this._pointerEvent||"pen"!==t.pointerType&&"touch"!==t.pointerType?this._pointerEvent||(this.touchStartX=t.touches[0].clientX):this.touchStartX=t.clientX},e=t=>{this.touchDeltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this.touchStartX},i=t=>{!this._pointerEvent||"pen"!==t.pointerType&&"touch"!==t.pointerType||(this.touchDeltaX=t.clientX-this.touchStartX),this._handleSwipe(),"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout(t=>this.cycle(t),500+this._config.interval))};U.find(".carousel-item img",this._element).forEach(t=>{P.on(t,"dragstart.bs.carousel",t=>t.preventDefault())}),this._pointerEvent?(P.on(this._element,"pointerdown.bs.carousel",e=>t(e)),P.on(this._element,"pointerup.bs.carousel",t=>i(t)),this._element.classList.add("pointer-event")):(P.on(this._element,"touchstart.bs.carousel",e=>t(e)),P.on(this._element,"touchmove.bs.carousel",t=>e(t)),P.on(this._element,"touchend.bs.carousel",t=>i(t)))}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=G[t.key];e&&(t.preventDefault(),this._slide(e))}_getItemIndex(t){return this._items=t&&t.parentNode?U.find(".carousel-item",t.parentNode):[],this._items.indexOf(t)}_getItemByOrder(t,e){const i=t===K;return b(this._items,e,i,this._config.wrap)}_triggerSlideEvent(t,e){const i=this._getItemIndex(t),n=this._getItemIndex(U.findOne(".active.carousel-item",this._element));return P.trigger(this._element,"slide.bs.carousel",{relatedTarget:t,direction:e,from:n,to:i})}_setActiveIndicatorElement(t){if(this._indicatorsElement){const e=U.findOne(".active",this._indicatorsElement);e.classList.remove("active"),e.removeAttribute("aria-current");const i=U.find("[data-bs-target]",this._indicatorsElement);for(let e=0;e{P.trigger(this._element,"slid.bs.carousel",{relatedTarget:o,direction:u,from:s,to:r})};if(this._element.classList.contains("slide")){o.classList.add(h),d(o),n.classList.add(c),o.classList.add(c);const t=()=>{o.classList.remove(c,h),o.classList.add("active"),n.classList.remove("active",h,c),this._isSliding=!1,setTimeout(f,0)};this._queueCallback(t,n,!0)}else n.classList.remove("active"),o.classList.add("active"),this._isSliding=!1,f();a&&this.cycle()}_directionToOrder(t){return[Q,Y].includes(t)?p()?t===Y?X:K:t===Y?K:X:t}_orderToDirection(t){return[K,X].includes(t)?p()?t===X?Y:Q:t===X?Q:Y:t}static carouselInterface(t,e){const i=Z.getOrCreateInstance(t,e);let{_config:n}=i;"object"==typeof e&&(n={...n,...e});const s="string"==typeof e?e:n.slide;if("number"==typeof e)i.to(e);else if("string"==typeof s){if(void 0===i[s])throw new TypeError(`No method named "${s}"`);i[s]()}else n.interval&&n.ride&&(i.pause(),i.cycle())}static jQueryInterface(t){return this.each((function(){Z.carouselInterface(this,t)}))}static dataApiClickHandler(t){const e=i(this);if(!e||!e.classList.contains("carousel"))return;const n={...F.getDataAttributes(e),...F.getDataAttributes(this)},s=this.getAttribute("data-bs-slide-to");s&&(n.interval=!1),Z.carouselInterface(e,n),s&&Z.getInstance(e).to(s),t.preventDefault()}}P.on(document,"click.bs.carousel.data-api","[data-bs-slide], [data-bs-slide-to]",Z.dataApiClickHandler),P.on(window,"load.bs.carousel.data-api",()=>{const t=U.find('[data-bs-ride="carousel"]');for(let e=0,i=t.length;et===this._element);null!==s&&o.length&&(this._selector=s,this._triggerArray.push(i))}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return J}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t,e=[];if(this._config.parent){const t=U.find(".collapse .collapse",this._config.parent);e=U.find(".show, .collapsing",this._config.parent).filter(e=>!t.includes(e))}const i=U.findOne(this._selector);if(e.length){const n=e.find(t=>i!==t);if(t=n?et.getInstance(n):null,t&&t._isTransitioning)return}if(P.trigger(this._element,"show.bs.collapse").defaultPrevented)return;e.forEach(e=>{i!==e&&et.getOrCreateInstance(e,{toggle:!1}).hide(),t||M.set(e,"bs.collapse",null)});const n=this._getDimension();this._element.classList.remove("collapse"),this._element.classList.add("collapsing"),this._element.style[n]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const s="scroll"+(n[0].toUpperCase()+n.slice(1));this._queueCallback(()=>{this._isTransitioning=!1,this._element.classList.remove("collapsing"),this._element.classList.add("collapse","show"),this._element.style[n]="",P.trigger(this._element,"shown.bs.collapse")},this._element,!0),this._element.style[n]=this._element[s]+"px"}hide(){if(this._isTransitioning||!this._isShown())return;if(P.trigger(this._element,"hide.bs.collapse").defaultPrevented)return;const t=this._getDimension();this._element.style[t]=this._element.getBoundingClientRect()[t]+"px",d(this._element),this._element.classList.add("collapsing"),this._element.classList.remove("collapse","show");const e=this._triggerArray.length;for(let t=0;t{this._isTransitioning=!1,this._element.classList.remove("collapsing"),this._element.classList.add("collapse"),P.trigger(this._element,"hidden.bs.collapse")},this._element,!0)}_isShown(t=this._element){return t.classList.contains("show")}_getConfig(t){return(t={...J,...F.getDataAttributes(this._element),...t}).toggle=Boolean(t.toggle),t.parent=o(t.parent),r("collapse",t,tt),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=U.find(".collapse .collapse",this._config.parent);U.find('[data-bs-toggle="collapse"]',this._config.parent).filter(e=>!t.includes(e)).forEach(t=>{const e=i(t);e&&this._addAriaAndCollapsedClass([t],this._isShown(e))})}_addAriaAndCollapsedClass(t,e){t.length&&t.forEach(t=>{e?t.classList.remove("collapsed"):t.classList.add("collapsed"),t.setAttribute("aria-expanded",e)})}static jQueryInterface(t){return this.each((function(){const e={};"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1);const i=et.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}P.on(document,"click.bs.collapse.data-api",'[data-bs-toggle="collapse"]',(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();const i=e(this);U.find(i).forEach(t=>{et.getOrCreateInstance(t,{toggle:!1}).toggle()})})),m(et);var it="top",nt="bottom",st="right",ot="left",rt=[it,nt,st,ot],at=rt.reduce((function(t,e){return t.concat([e+"-start",e+"-end"])}),[]),lt=[].concat(rt,["auto"]).reduce((function(t,e){return t.concat([e,e+"-start",e+"-end"])}),[]),ct=["beforeRead","read","afterRead","beforeMain","main","afterMain","beforeWrite","write","afterWrite"];function ht(t){return t?(t.nodeName||"").toLowerCase():null}function dt(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function ut(t){return t instanceof dt(t).Element||t instanceof Element}function ft(t){return t instanceof dt(t).HTMLElement||t instanceof HTMLElement}function pt(t){return"undefined"!=typeof ShadowRoot&&(t instanceof dt(t).ShadowRoot||t instanceof ShadowRoot)}var mt={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];ft(s)&&ht(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});ft(n)&&ht(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function gt(t){return t.split("-")[0]}var _t=Math.round;function bt(t,e){void 0===e&&(e=!1);var i=t.getBoundingClientRect(),n=1,s=1;return ft(t)&&e&&(n=i.width/t.offsetWidth||1,s=i.height/t.offsetHeight||1),{width:_t(i.width/n),height:_t(i.height/s),top:_t(i.top/s),right:_t(i.right/n),bottom:_t(i.bottom/s),left:_t(i.left/n),x:_t(i.left/n),y:_t(i.top/s)}}function vt(t){var e=bt(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function yt(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&pt(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function wt(t){return dt(t).getComputedStyle(t)}function Et(t){return["table","td","th"].indexOf(ht(t))>=0}function At(t){return((ut(t)?t.ownerDocument:t.document)||window.document).documentElement}function Tt(t){return"html"===ht(t)?t:t.assignedSlot||t.parentNode||(pt(t)?t.host:null)||At(t)}function Ot(t){return ft(t)&&"fixed"!==wt(t).position?t.offsetParent:null}function Ct(t){for(var e=dt(t),i=Ot(t);i&&Et(i)&&"static"===wt(i).position;)i=Ot(i);return i&&("html"===ht(i)||"body"===ht(i)&&"static"===wt(i).position)?e:i||function(t){var e=-1!==navigator.userAgent.toLowerCase().indexOf("firefox");if(-1!==navigator.userAgent.indexOf("Trident")&&ft(t)&&"fixed"===wt(t).position)return null;for(var i=Tt(t);ft(i)&&["html","body"].indexOf(ht(i))<0;){var n=wt(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function kt(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}var Lt=Math.max,xt=Math.min,Dt=Math.round;function St(t,e,i){return Lt(t,xt(e,i))}function Nt(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function It(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}var Pt={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,n=t.name,s=t.options,o=i.elements.arrow,r=i.modifiersData.popperOffsets,a=gt(i.placement),l=kt(a),c=[ot,st].indexOf(a)>=0?"height":"width";if(o&&r){var h=function(t,e){return Nt("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:It(t,rt))}(s.padding,i),d=vt(o),u="y"===l?it:ot,f="y"===l?nt:st,p=i.rects.reference[c]+i.rects.reference[l]-r[l]-i.rects.popper[c],m=r[l]-i.rects.reference[l],g=Ct(o),_=g?"y"===l?g.clientHeight||0:g.clientWidth||0:0,b=p/2-m/2,v=h[u],y=_-d[c]-h[f],w=_/2-d[c]/2+b,E=St(v,w,y),A=l;i.modifiersData[n]=((e={})[A]=E,e.centerOffset=E-w,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&yt(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]},jt={top:"auto",right:"auto",bottom:"auto",left:"auto"};function Mt(t){var e,i=t.popper,n=t.popperRect,s=t.placement,o=t.offsets,r=t.position,a=t.gpuAcceleration,l=t.adaptive,c=t.roundOffsets,h=!0===c?function(t){var e=t.x,i=t.y,n=window.devicePixelRatio||1;return{x:Dt(Dt(e*n)/n)||0,y:Dt(Dt(i*n)/n)||0}}(o):"function"==typeof c?c(o):o,d=h.x,u=void 0===d?0:d,f=h.y,p=void 0===f?0:f,m=o.hasOwnProperty("x"),g=o.hasOwnProperty("y"),_=ot,b=it,v=window;if(l){var y=Ct(i),w="clientHeight",E="clientWidth";y===dt(i)&&"static"!==wt(y=At(i)).position&&(w="scrollHeight",E="scrollWidth"),y=y,s===it&&(b=nt,p-=y[w]-n.height,p*=a?1:-1),s===ot&&(_=st,u-=y[E]-n.width,u*=a?1:-1)}var A,T=Object.assign({position:r},l&&jt);return a?Object.assign({},T,((A={})[b]=g?"0":"",A[_]=m?"0":"",A.transform=(v.devicePixelRatio||1)<2?"translate("+u+"px, "+p+"px)":"translate3d("+u+"px, "+p+"px, 0)",A)):Object.assign({},T,((e={})[b]=g?p+"px":"",e[_]=m?u+"px":"",e.transform="",e))}var Ht={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:gt(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,Mt(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,Mt(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}},Bt={passive:!0},Rt={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=dt(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,Bt)})),a&&l.addEventListener("resize",i.update,Bt),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,Bt)})),a&&l.removeEventListener("resize",i.update,Bt)}},data:{}},Wt={left:"right",right:"left",bottom:"top",top:"bottom"};function zt(t){return t.replace(/left|right|bottom|top/g,(function(t){return Wt[t]}))}var qt={start:"end",end:"start"};function Ft(t){return t.replace(/start|end/g,(function(t){return qt[t]}))}function Ut(t){var e=dt(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function $t(t){return bt(At(t)).left+Ut(t).scrollLeft}function Vt(t){var e=wt(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function Kt(t,e){var i;void 0===e&&(e=[]);var n=function t(e){return["html","body","#document"].indexOf(ht(e))>=0?e.ownerDocument.body:ft(e)&&Vt(e)?e:t(Tt(e))}(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=dt(n),r=s?[o].concat(o.visualViewport||[],Vt(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(Kt(Tt(r)))}function Xt(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function Yt(t,e){return"viewport"===e?Xt(function(t){var e=dt(t),i=At(t),n=e.visualViewport,s=i.clientWidth,o=i.clientHeight,r=0,a=0;return n&&(s=n.width,o=n.height,/^((?!chrome|android).)*safari/i.test(navigator.userAgent)||(r=n.offsetLeft,a=n.offsetTop)),{width:s,height:o,x:r+$t(t),y:a}}(t)):ft(e)?function(t){var e=bt(t);return e.top=e.top+t.clientTop,e.left=e.left+t.clientLeft,e.bottom=e.top+t.clientHeight,e.right=e.left+t.clientWidth,e.width=t.clientWidth,e.height=t.clientHeight,e.x=e.left,e.y=e.top,e}(e):Xt(function(t){var e,i=At(t),n=Ut(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=Lt(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=Lt(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+$t(t),l=-n.scrollTop;return"rtl"===wt(s||i).direction&&(a+=Lt(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(At(t)))}function Qt(t){return t.split("-")[1]}function Gt(t){var e,i=t.reference,n=t.element,s=t.placement,o=s?gt(s):null,r=s?Qt(s):null,a=i.x+i.width/2-n.width/2,l=i.y+i.height/2-n.height/2;switch(o){case it:e={x:a,y:i.y-n.height};break;case nt:e={x:a,y:i.y+i.height};break;case st:e={x:i.x+i.width,y:l};break;case ot:e={x:i.x-n.width,y:l};break;default:e={x:i.x,y:i.y}}var c=o?kt(o):null;if(null!=c){var h="y"===c?"height":"width";switch(r){case"start":e[c]=e[c]-(i[h]/2-n[h]/2);break;case"end":e[c]=e[c]+(i[h]/2-n[h]/2)}}return e}function Zt(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=void 0===n?t.placement:n,o=i.boundary,r=void 0===o?"clippingParents":o,a=i.rootBoundary,l=void 0===a?"viewport":a,c=i.elementContext,h=void 0===c?"popper":c,d=i.altBoundary,u=void 0!==d&&d,f=i.padding,p=void 0===f?0:f,m=Nt("number"!=typeof p?p:It(p,rt)),g="popper"===h?"reference":"popper",_=t.elements.reference,b=t.rects.popper,v=t.elements[u?g:h],y=function(t,e,i){var n="clippingParents"===e?function(t){var e=Kt(Tt(t)),i=["absolute","fixed"].indexOf(wt(t).position)>=0&&ft(t)?Ct(t):t;return ut(i)?e.filter((function(t){return ut(t)&&yt(t,i)&&"body"!==ht(t)})):[]}(t):[].concat(e),s=[].concat(n,[i]),o=s[0],r=s.reduce((function(e,i){var n=Yt(t,i);return e.top=Lt(n.top,e.top),e.right=xt(n.right,e.right),e.bottom=xt(n.bottom,e.bottom),e.left=Lt(n.left,e.left),e}),Yt(t,o));return r.width=r.right-r.left,r.height=r.bottom-r.top,r.x=r.left,r.y=r.top,r}(ut(v)?v:v.contextElement||At(t.elements.popper),r,l),w=bt(_),E=Gt({reference:w,element:b,strategy:"absolute",placement:s}),A=Xt(Object.assign({},b,E)),T="popper"===h?A:w,O={top:y.top-T.top+m.top,bottom:T.bottom-y.bottom+m.bottom,left:y.left-T.left+m.left,right:T.right-y.right+m.right},C=t.modifiersData.offset;if("popper"===h&&C){var k=C[s];Object.keys(O).forEach((function(t){var e=[st,nt].indexOf(t)>=0?1:-1,i=[it,nt].indexOf(t)>=0?"y":"x";O[t]+=k[i]*e}))}return O}function Jt(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,l=i.allowedAutoPlacements,c=void 0===l?lt:l,h=Qt(n),d=h?a?at:at.filter((function(t){return Qt(t)===h})):rt,u=d.filter((function(t){return c.indexOf(t)>=0}));0===u.length&&(u=d);var f=u.reduce((function(e,i){return e[i]=Zt(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[gt(i)],e}),{});return Object.keys(f).sort((function(t,e){return f[t]-f[e]}))}var te={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name;if(!e.modifiersData[n]._skip){for(var s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0===r||r,l=i.fallbackPlacements,c=i.padding,h=i.boundary,d=i.rootBoundary,u=i.altBoundary,f=i.flipVariations,p=void 0===f||f,m=i.allowedAutoPlacements,g=e.options.placement,_=gt(g),b=l||(_!==g&&p?function(t){if("auto"===gt(t))return[];var e=zt(t);return[Ft(t),e,Ft(e)]}(g):[zt(g)]),v=[g].concat(b).reduce((function(t,i){return t.concat("auto"===gt(i)?Jt(e,{placement:i,boundary:h,rootBoundary:d,padding:c,flipVariations:p,allowedAutoPlacements:m}):i)}),[]),y=e.rects.reference,w=e.rects.popper,E=new Map,A=!0,T=v[0],O=0;O=0,D=x?"width":"height",S=Zt(e,{placement:C,boundary:h,rootBoundary:d,altBoundary:u,padding:c}),N=x?L?st:ot:L?nt:it;y[D]>w[D]&&(N=zt(N));var I=zt(N),P=[];if(o&&P.push(S[k]<=0),a&&P.push(S[N]<=0,S[I]<=0),P.every((function(t){return t}))){T=C,A=!1;break}E.set(C,P)}if(A)for(var j=function(t){var e=v.find((function(e){var i=E.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return T=e,"break"},M=p?3:1;M>0&&"break"!==j(M);M--);e.placement!==T&&(e.modifiersData[n]._skip=!0,e.placement=T,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function ee(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function ie(t){return[it,st,nt,ot].some((function(e){return t[e]>=0}))}var ne={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=Zt(e,{elementContext:"reference"}),a=Zt(e,{altBoundary:!0}),l=ee(r,n),c=ee(a,s,o),h=ie(l),d=ie(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},se={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.offset,o=void 0===s?[0,0]:s,r=lt.reduce((function(t,i){return t[i]=function(t,e,i){var n=gt(t),s=[ot,it].indexOf(n)>=0?-1:1,o="function"==typeof i?i(Object.assign({},e,{placement:t})):i,r=o[0],a=o[1];return r=r||0,a=(a||0)*s,[ot,st].indexOf(n)>=0?{x:a,y:r}:{x:r,y:a}}(i,e.rects,o),t}),{}),a=r[e.placement],l=a.x,c=a.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=l,e.modifiersData.popperOffsets.y+=c),e.modifiersData[n]=r}},oe={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=Gt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},re={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0!==r&&r,l=i.boundary,c=i.rootBoundary,h=i.altBoundary,d=i.padding,u=i.tether,f=void 0===u||u,p=i.tetherOffset,m=void 0===p?0:p,g=Zt(e,{boundary:l,rootBoundary:c,padding:d,altBoundary:h}),_=gt(e.placement),b=Qt(e.placement),v=!b,y=kt(_),w="x"===y?"y":"x",E=e.modifiersData.popperOffsets,A=e.rects.reference,T=e.rects.popper,O="function"==typeof m?m(Object.assign({},e.rects,{placement:e.placement})):m,C={x:0,y:0};if(E){if(o||a){var k="y"===y?it:ot,L="y"===y?nt:st,x="y"===y?"height":"width",D=E[y],S=E[y]+g[k],N=E[y]-g[L],I=f?-T[x]/2:0,P="start"===b?A[x]:T[x],j="start"===b?-T[x]:-A[x],M=e.elements.arrow,H=f&&M?vt(M):{width:0,height:0},B=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},R=B[k],W=B[L],z=St(0,A[x],H[x]),q=v?A[x]/2-I-z-R-O:P-z-R-O,F=v?-A[x]/2+I+z+W+O:j+z+W+O,U=e.elements.arrow&&Ct(e.elements.arrow),$=U?"y"===y?U.clientTop||0:U.clientLeft||0:0,V=e.modifiersData.offset?e.modifiersData.offset[e.placement][y]:0,K=E[y]+q-V-$,X=E[y]+F-V;if(o){var Y=St(f?xt(S,K):S,D,f?Lt(N,X):N);E[y]=Y,C[y]=Y-D}if(a){var Q="x"===y?it:ot,G="x"===y?nt:st,Z=E[w],J=Z+g[Q],tt=Z-g[G],et=St(f?xt(J,K):J,Z,f?Lt(tt,X):tt);E[w]=et,C[w]=et-Z}}e.modifiersData[n]=C}},requiresIfExists:["offset"]};function ae(t,e,i){void 0===i&&(i=!1);var n,s,o=ft(e),r=ft(e)&&function(t){var e=t.getBoundingClientRect(),i=e.width/t.offsetWidth||1,n=e.height/t.offsetHeight||1;return 1!==i||1!==n}(e),a=At(e),l=bt(t,r),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==ht(e)||Vt(a))&&(c=(n=e)!==dt(n)&&ft(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:Ut(n)),ft(e)?((h=bt(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=$t(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}var le={placement:"bottom",modifiers:[],strategy:"absolute"};function ce(){for(var t=arguments.length,e=new Array(t),i=0;iP.on(t,"mouseover",h)),this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add("show"),this._element.classList.add("show"),P.trigger(this._element,"shown.bs.dropdown",t)}hide(){if(l(this._element)||!this._isShown(this._menu))return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){P.trigger(this._element,"hide.bs.dropdown",t).defaultPrevented||("ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach(t=>P.off(t,"mouseover",h)),this._popper&&this._popper.destroy(),this._menu.classList.remove("show"),this._element.classList.remove("show"),this._element.setAttribute("aria-expanded","false"),F.removeDataAttribute(this._menu,"popper"),P.trigger(this._element,"hidden.bs.dropdown",t))}_getConfig(t){if(t={...this.constructor.Default,...F.getDataAttributes(this._element),...t},r("dropdown",t,this.constructor.DefaultType),"object"==typeof t.reference&&!s(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError("dropdown".toUpperCase()+': Option "reference" provided type "object" without a required "getBoundingClientRect" method.');return t}_createPopper(t){if(void 0===pe)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let e=this._element;"parent"===this._config.reference?e=t:s(this._config.reference)?e=o(this._config.reference):"object"==typeof this._config.reference&&(e=this._config.reference);const i=this._getPopperConfig(),n=i.modifiers.find(t=>"applyStyles"===t.name&&!1===t.enabled);this._popper=fe(e,this._menu,i),n&&F.setDataAttribute(this._menu,"popper","static")}_isShown(t=this._element){return t.classList.contains("show")}_getMenuElement(){return U.next(this._element,".dropdown-menu")[0]}_getPlacement(){const t=this._element.parentNode;if(t.classList.contains("dropend"))return ye;if(t.classList.contains("dropstart"))return we;const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?_e:ge:e?ve:be}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map(t=>Number.parseInt(t,10)):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return"static"===this._config.display&&(t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,..."function"==typeof this._config.popperConfig?this._config.popperConfig(t):this._config.popperConfig}}_selectMenuItem({key:t,target:e}){const i=U.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter(a);i.length&&b(i,e,"ArrowDown"===t,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=Te.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(t&&(2===t.button||"keyup"===t.type&&"Tab"!==t.key))return;const e=U.find('[data-bs-toggle="dropdown"]');for(let i=0,n=e.length;ie+t),this._setElementAttributes(".fixed-top, .fixed-bottom, .is-fixed, .sticky-top","paddingRight",e=>e+t),this._setElementAttributes(".sticky-top","marginRight",e=>e-t)}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t)[e];t.style[e]=i(Number.parseFloat(s))+"px"})}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,"paddingRight"),this._resetElementAttributes(".fixed-top, .fixed-bottom, .is-fixed, .sticky-top","paddingRight"),this._resetElementAttributes(".sticky-top","marginRight")}_saveInitialAttribute(t,e){const i=t.style[e];i&&F.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,t=>{const i=F.getDataAttribute(t,e);void 0===i?t.style.removeProperty(e):(F.removeDataAttribute(t,e),t.style[e]=i)})}_applyManipulationCallback(t,e){s(t)?e(t):U.find(t,this._element).forEach(e)}isOverflowing(){return this.getWidth()>0}}const Ce={className:"modal-backdrop",isVisible:!0,isAnimated:!1,rootElement:"body",clickCallback:null},ke={className:"string",isVisible:"boolean",isAnimated:"boolean",rootElement:"(element|string)",clickCallback:"(function|null)"};class Le{constructor(t){this._config=this._getConfig(t),this._isAppended=!1,this._element=null}show(t){this._config.isVisible?(this._append(),this._config.isAnimated&&d(this._getElement()),this._getElement().classList.add("show"),this._emulateAnimation(()=>{g(t)})):g(t)}hide(t){this._config.isVisible?(this._getElement().classList.remove("show"),this._emulateAnimation(()=>{this.dispose(),g(t)})):g(t)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_getConfig(t){return(t={...Ce,..."object"==typeof t?t:{}}).rootElement=o(t.rootElement),r("backdrop",t,ke),t}_append(){this._isAppended||(this._config.rootElement.append(this._getElement()),P.on(this._getElement(),"mousedown.bs.backdrop",()=>{g(this._config.clickCallback)}),this._isAppended=!0)}dispose(){this._isAppended&&(P.off(this._element,"mousedown.bs.backdrop"),this._element.remove(),this._isAppended=!1)}_emulateAnimation(t){_(t,this._getElement(),this._config.isAnimated)}}const xe={trapElement:null,autofocus:!0},De={trapElement:"element",autofocus:"boolean"};class Se{constructor(t){this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}activate(){const{trapElement:t,autofocus:e}=this._config;this._isActive||(e&&t.focus(),P.off(document,".bs.focustrap"),P.on(document,"focusin.bs.focustrap",t=>this._handleFocusin(t)),P.on(document,"keydown.tab.bs.focustrap",t=>this._handleKeydown(t)),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,P.off(document,".bs.focustrap"))}_handleFocusin(t){const{target:e}=t,{trapElement:i}=this._config;if(e===document||e===i||i.contains(e))return;const n=U.focusableChildren(i);0===n.length?i.focus():"backward"===this._lastTabNavDirection?n[n.length-1].focus():n[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?"backward":"forward")}_getConfig(t){return t={...xe,..."object"==typeof t?t:{}},r("focustrap",t,De),t}}const Ne={backdrop:!0,keyboard:!0,focus:!0},Ie={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean"};class Pe extends H{constructor(t,e){super(t),this._config=this._getConfig(e),this._dialog=U.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._ignoreBackdropClick=!1,this._isTransitioning=!1,this._scrollBar=new Oe}static get Default(){return Ne}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||P.trigger(this._element,"show.bs.modal",{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isAnimated()&&(this._isTransitioning=!0),this._scrollBar.hide(),document.body.classList.add("modal-open"),this._adjustDialog(),this._setEscapeEvent(),this._setResizeEvent(),P.on(this._dialog,"mousedown.dismiss.bs.modal",()=>{P.one(this._element,"mouseup.dismiss.bs.modal",t=>{t.target===this._element&&(this._ignoreBackdropClick=!0)})}),this._showBackdrop(()=>this._showElement(t)))}hide(){if(!this._isShown||this._isTransitioning)return;if(P.trigger(this._element,"hide.bs.modal").defaultPrevented)return;this._isShown=!1;const t=this._isAnimated();t&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),this._focustrap.deactivate(),this._element.classList.remove("show"),P.off(this._element,"click.dismiss.bs.modal"),P.off(this._dialog,"mousedown.dismiss.bs.modal"),this._queueCallback(()=>this._hideModal(),this._element,t)}dispose(){[window,this._dialog].forEach(t=>P.off(t,".bs.modal")),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Le({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new Se({trapElement:this._element})}_getConfig(t){return t={...Ne,...F.getDataAttributes(this._element),..."object"==typeof t?t:{}},r("modal",t,Ie),t}_showElement(t){const e=this._isAnimated(),i=U.findOne(".modal-body",this._dialog);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0,i&&(i.scrollTop=0),e&&d(this._element),this._element.classList.add("show"),this._queueCallback(()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,P.trigger(this._element,"shown.bs.modal",{relatedTarget:t})},this._dialog,e)}_setEscapeEvent(){this._isShown?P.on(this._element,"keydown.dismiss.bs.modal",t=>{this._config.keyboard&&"Escape"===t.key?(t.preventDefault(),this.hide()):this._config.keyboard||"Escape"!==t.key||this._triggerBackdropTransition()}):P.off(this._element,"keydown.dismiss.bs.modal")}_setResizeEvent(){this._isShown?P.on(window,"resize.bs.modal",()=>this._adjustDialog()):P.off(window,"resize.bs.modal")}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide(()=>{document.body.classList.remove("modal-open"),this._resetAdjustments(),this._scrollBar.reset(),P.trigger(this._element,"hidden.bs.modal")})}_showBackdrop(t){P.on(this._element,"click.dismiss.bs.modal",t=>{this._ignoreBackdropClick?this._ignoreBackdropClick=!1:t.target===t.currentTarget&&(!0===this._config.backdrop?this.hide():"static"===this._config.backdrop&&this._triggerBackdropTransition())}),this._backdrop.show(t)}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(P.trigger(this._element,"hidePrevented.bs.modal").defaultPrevented)return;const{classList:t,scrollHeight:e,style:i}=this._element,n=e>document.documentElement.clientHeight;!n&&"hidden"===i.overflowY||t.contains("modal-static")||(n||(i.overflowY="hidden"),t.add("modal-static"),this._queueCallback(()=>{t.remove("modal-static"),n||this._queueCallback(()=>{i.overflowY=""},this._dialog)},this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;(!i&&t&&!p()||i&&!t&&p())&&(this._element.style.paddingLeft=e+"px"),(i&&!t&&!p()||!i&&t&&p())&&(this._element.style.paddingRight=e+"px")}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=Pe.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}P.on(document,"click.bs.modal.data-api",'[data-bs-toggle="modal"]',(function(t){const e=i(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),P.one(e,"show.bs.modal",t=>{t.defaultPrevented||P.one(e,"hidden.bs.modal",()=>{a(this)&&this.focus()})}),Pe.getOrCreateInstance(e).toggle(this)})),B(Pe),m(Pe);const je={backdrop:!0,keyboard:!0,scroll:!1},Me={backdrop:"boolean",keyboard:"boolean",scroll:"boolean"};class He extends H{constructor(t,e){super(t),this._config=this._getConfig(e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get NAME(){return"offcanvas"}static get Default(){return je}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||P.trigger(this._element,"show.bs.offcanvas",{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._element.style.visibility="visible",this._backdrop.show(),this._config.scroll||(new Oe).hide(),this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add("show"),this._queueCallback(()=>{this._config.scroll||this._focustrap.activate(),P.trigger(this._element,"shown.bs.offcanvas",{relatedTarget:t})},this._element,!0))}hide(){this._isShown&&(P.trigger(this._element,"hide.bs.offcanvas").defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.remove("show"),this._backdrop.hide(),this._queueCallback(()=>{this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._element.style.visibility="hidden",this._config.scroll||(new Oe).reset(),P.trigger(this._element,"hidden.bs.offcanvas")},this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_getConfig(t){return t={...je,...F.getDataAttributes(this._element),..."object"==typeof t?t:{}},r("offcanvas",t,Me),t}_initializeBackDrop(){return new Le({className:"offcanvas-backdrop",isVisible:this._config.backdrop,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:()=>this.hide()})}_initializeFocusTrap(){return new Se({trapElement:this._element})}_addEventListeners(){P.on(this._element,"keydown.dismiss.bs.offcanvas",t=>{this._config.keyboard&&"Escape"===t.key&&this.hide()})}static jQueryInterface(t){return this.each((function(){const e=He.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}P.on(document,"click.bs.offcanvas.data-api",'[data-bs-toggle="offcanvas"]',(function(t){const e=i(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),l(this))return;P.one(e,"hidden.bs.offcanvas",()=>{a(this)&&this.focus()});const n=U.findOne(".offcanvas.show");n&&n!==e&&He.getInstance(n).hide(),He.getOrCreateInstance(e).toggle(this)})),P.on(window,"load.bs.offcanvas.data-api",()=>U.find(".offcanvas.show").forEach(t=>He.getOrCreateInstance(t).show())),B(He),m(He);const Be=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Re=/^(?:(?:https?|mailto|ftp|tel|file):|[^#&/:?]*(?:[#/?]|$))/i,We=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i,ze=(t,e)=>{const i=t.nodeName.toLowerCase();if(e.includes(i))return!Be.has(i)||Boolean(Re.test(t.nodeValue)||We.test(t.nodeValue));const n=e.filter(t=>t instanceof RegExp);for(let t=0,e=n.length;t{ze(t,a)||i.removeAttribute(t.nodeName)})}return n.body.innerHTML}const Fe=new Set(["sanitize","allowList","sanitizeFn"]),Ue={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(array|string|function)",container:"(string|element|boolean)",fallbackPlacements:"array",boundary:"(string|element)",customClass:"(string|function)",sanitize:"boolean",sanitizeFn:"(null|function)",allowList:"object",popperConfig:"(null|object|function)"},$e={AUTO:"auto",TOP:"top",RIGHT:p()?"left":"right",BOTTOM:"bottom",LEFT:p()?"right":"left"},Ve={animation:!0,template:'',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:[0,0],container:!1,fallbackPlacements:["top","right","bottom","left"],boundary:"clippingParents",customClass:"",sanitize:!0,sanitizeFn:null,allowList:{"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},popperConfig:null},Ke={HIDE:"hide.bs.tooltip",HIDDEN:"hidden.bs.tooltip",SHOW:"show.bs.tooltip",SHOWN:"shown.bs.tooltip",INSERTED:"inserted.bs.tooltip",CLICK:"click.bs.tooltip",FOCUSIN:"focusin.bs.tooltip",FOCUSOUT:"focusout.bs.tooltip",MOUSEENTER:"mouseenter.bs.tooltip",MOUSELEAVE:"mouseleave.bs.tooltip"};class Xe extends H{constructor(t,e){if(void 0===pe)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t),this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this._config=this._getConfig(e),this.tip=null,this._setListeners()}static get Default(){return Ve}static get NAME(){return"tooltip"}static get Event(){return Ke}static get DefaultType(){return Ue}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(t){if(this._isEnabled)if(t){const e=this._initializeOnDelegatedTarget(t);e._activeTrigger.click=!e._activeTrigger.click,e._isWithActiveTrigger()?e._enter(null,e):e._leave(null,e)}else{if(this.getTipElement().classList.contains("show"))return void this._leave(null,this);this._enter(null,this)}}dispose(){clearTimeout(this._timeout),P.off(this._element.closest(".modal"),"hide.bs.modal",this._hideModalHandler),this.tip&&this.tip.remove(),this._popper&&this._popper.destroy(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this.isWithContent()||!this._isEnabled)return;const t=P.trigger(this._element,this.constructor.Event.SHOW),e=c(this._element),i=null===e?this._element.ownerDocument.documentElement.contains(this._element):e.contains(this._element);if(t.defaultPrevented||!i)return;const n=this.getTipElement(),s=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME);n.setAttribute("id",s),this._element.setAttribute("aria-describedby",s),this._config.animation&&n.classList.add("fade");const o="function"==typeof this._config.placement?this._config.placement.call(this,n,this._element):this._config.placement,r=this._getAttachment(o);this._addAttachmentClass(r);const{container:a}=this._config;M.set(n,this.constructor.DATA_KEY,this),this._element.ownerDocument.documentElement.contains(this.tip)||(a.append(n),P.trigger(this._element,this.constructor.Event.INSERTED)),this._popper?this._popper.update():this._popper=fe(this._element,n,this._getPopperConfig(r)),n.classList.add("show");const l=this._resolvePossibleFunction(this._config.customClass);l&&n.classList.add(...l.split(" ")),"ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach(t=>{P.on(t,"mouseover",h)});const d=this.tip.classList.contains("fade");this._queueCallback(()=>{const t=this._hoverState;this._hoverState=null,P.trigger(this._element,this.constructor.Event.SHOWN),"out"===t&&this._leave(null,this)},this.tip,d)}hide(){if(!this._popper)return;const t=this.getTipElement();if(P.trigger(this._element,this.constructor.Event.HIDE).defaultPrevented)return;t.classList.remove("show"),"ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach(t=>P.off(t,"mouseover",h)),this._activeTrigger.click=!1,this._activeTrigger.focus=!1,this._activeTrigger.hover=!1;const e=this.tip.classList.contains("fade");this._queueCallback(()=>{this._isWithActiveTrigger()||("show"!==this._hoverState&&t.remove(),this._cleanTipClass(),this._element.removeAttribute("aria-describedby"),P.trigger(this._element,this.constructor.Event.HIDDEN),this._popper&&(this._popper.destroy(),this._popper=null))},this.tip,e),this._hoverState=""}update(){null!==this._popper&&this._popper.update()}isWithContent(){return Boolean(this.getTitle())}getTipElement(){if(this.tip)return this.tip;const t=document.createElement("div");t.innerHTML=this._config.template;const e=t.children[0];return this.setContent(e),e.classList.remove("fade","show"),this.tip=e,this.tip}setContent(t){this._sanitizeAndSetContent(t,this.getTitle(),".tooltip-inner")}_sanitizeAndSetContent(t,e,i){const n=U.findOne(i,t);e||!n?this.setElementContent(n,e):n.remove()}setElementContent(t,e){if(null!==t)return s(e)?(e=o(e),void(this._config.html?e.parentNode!==t&&(t.innerHTML="",t.append(e)):t.textContent=e.textContent)):void(this._config.html?(this._config.sanitize&&(e=qe(e,this._config.allowList,this._config.sanitizeFn)),t.innerHTML=e):t.textContent=e)}getTitle(){const t=this._element.getAttribute("data-bs-original-title")||this._config.title;return this._resolvePossibleFunction(t)}updateAttachment(t){return"right"===t?"end":"left"===t?"start":t}_initializeOnDelegatedTarget(t,e){return e||this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map(t=>Number.parseInt(t,10)):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return"function"==typeof t?t.call(this._element):t}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"onChange",enabled:!0,phase:"afterWrite",fn:t=>this._handlePopperPlacementChange(t)}],onFirstUpdate:t=>{t.options.placement!==t.placement&&this._handlePopperPlacementChange(t)}};return{...e,..."function"==typeof this._config.popperConfig?this._config.popperConfig(e):this._config.popperConfig}}_addAttachmentClass(t){this.getTipElement().classList.add(`${this._getBasicClassPrefix()}-${this.updateAttachment(t)}`)}_getAttachment(t){return $e[t.toUpperCase()]}_setListeners(){this._config.trigger.split(" ").forEach(t=>{if("click"===t)P.on(this._element,this.constructor.Event.CLICK,this._config.selector,t=>this.toggle(t));else if("manual"!==t){const e="hover"===t?this.constructor.Event.MOUSEENTER:this.constructor.Event.FOCUSIN,i="hover"===t?this.constructor.Event.MOUSELEAVE:this.constructor.Event.FOCUSOUT;P.on(this._element,e,this._config.selector,t=>this._enter(t)),P.on(this._element,i,this._config.selector,t=>this._leave(t))}}),this._hideModalHandler=()=>{this._element&&this.hide()},P.on(this._element.closest(".modal"),"hide.bs.modal",this._hideModalHandler),this._config.selector?this._config={...this._config,trigger:"manual",selector:""}:this._fixTitle()}_fixTitle(){const t=this._element.getAttribute("title"),e=typeof this._element.getAttribute("data-bs-original-title");(t||"string"!==e)&&(this._element.setAttribute("data-bs-original-title",t||""),!t||this._element.getAttribute("aria-label")||this._element.textContent||this._element.setAttribute("aria-label",t),this._element.setAttribute("title",""))}_enter(t,e){e=this._initializeOnDelegatedTarget(t,e),t&&(e._activeTrigger["focusin"===t.type?"focus":"hover"]=!0),e.getTipElement().classList.contains("show")||"show"===e._hoverState?e._hoverState="show":(clearTimeout(e._timeout),e._hoverState="show",e._config.delay&&e._config.delay.show?e._timeout=setTimeout(()=>{"show"===e._hoverState&&e.show()},e._config.delay.show):e.show())}_leave(t,e){e=this._initializeOnDelegatedTarget(t,e),t&&(e._activeTrigger["focusout"===t.type?"focus":"hover"]=e._element.contains(t.relatedTarget)),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState="out",e._config.delay&&e._config.delay.hide?e._timeout=setTimeout(()=>{"out"===e._hoverState&&e.hide()},e._config.delay.hide):e.hide())}_isWithActiveTrigger(){for(const t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1}_getConfig(t){const e=F.getDataAttributes(this._element);return Object.keys(e).forEach(t=>{Fe.has(t)&&delete e[t]}),(t={...this.constructor.Default,...e,..."object"==typeof t&&t?t:{}}).container=!1===t.container?document.body:o(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),r("tooltip",t,this.constructor.DefaultType),t.sanitize&&(t.template=qe(t.template,t.allowList,t.sanitizeFn)),t}_getDelegateConfig(){const t={};for(const e in this._config)this.constructor.Default[e]!==this._config[e]&&(t[e]=this._config[e]);return t}_cleanTipClass(){const t=this.getTipElement(),e=new RegExp(`(^|\\s)${this._getBasicClassPrefix()}\\S+`,"g"),i=t.getAttribute("class").match(e);null!==i&&i.length>0&&i.map(t=>t.trim()).forEach(e=>t.classList.remove(e))}_getBasicClassPrefix(){return"bs-tooltip"}_handlePopperPlacementChange(t){const{state:e}=t;e&&(this.tip=e.elements.popper,this._cleanTipClass(),this._addAttachmentClass(this._getAttachment(e.placement)))}static jQueryInterface(t){return this.each((function(){const e=Xe.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}m(Xe);const Ye={...Xe.Default,placement:"right",offset:[0,8],trigger:"click",content:"",template:''},Qe={...Xe.DefaultType,content:"(string|element|function)"},Ge={HIDE:"hide.bs.popover",HIDDEN:"hidden.bs.popover",SHOW:"show.bs.popover",SHOWN:"shown.bs.popover",INSERTED:"inserted.bs.popover",CLICK:"click.bs.popover",FOCUSIN:"focusin.bs.popover",FOCUSOUT:"focusout.bs.popover",MOUSEENTER:"mouseenter.bs.popover",MOUSELEAVE:"mouseleave.bs.popover"};class Ze extends Xe{static get Default(){return Ye}static get NAME(){return"popover"}static get Event(){return Ge}static get DefaultType(){return Qe}isWithContent(){return this.getTitle()||this._getContent()}setContent(t){this._sanitizeAndSetContent(t,this.getTitle(),".popover-header"),this._sanitizeAndSetContent(t,this._getContent(),".popover-body")}_getContent(){return this._resolvePossibleFunction(this._config.content)}_getBasicClassPrefix(){return"bs-popover"}static jQueryInterface(t){return this.each((function(){const e=Ze.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}m(Ze);const Je={offset:10,method:"auto",target:""},ti={offset:"number",method:"string",target:"(string|element)"},ei=".nav-link, .list-group-item, .dropdown-item";class ii extends H{constructor(t,e){super(t),this._scrollElement="BODY"===this._element.tagName?window:this._element,this._config=this._getConfig(e),this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,P.on(this._scrollElement,"scroll.bs.scrollspy",()=>this._process()),this.refresh(),this._process()}static get Default(){return Je}static get NAME(){return"scrollspy"}refresh(){const t=this._scrollElement===this._scrollElement.window?"offset":"position",i="auto"===this._config.method?t:this._config.method,n="position"===i?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),U.find(ei,this._config.target).map(t=>{const s=e(t),o=s?U.findOne(s):null;if(o){const t=o.getBoundingClientRect();if(t.width||t.height)return[F[i](o).top+n,s]}return null}).filter(t=>t).sort((t,e)=>t[0]-e[0]).forEach(t=>{this._offsets.push(t[0]),this._targets.push(t[1])})}dispose(){P.off(this._scrollElement,".bs.scrollspy"),super.dispose()}_getConfig(t){return(t={...Je,...F.getDataAttributes(this._element),..."object"==typeof t&&t?t:{}}).target=o(t.target)||document.documentElement,r("scrollspy",t,ti),t}_getScrollTop(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop}_getScrollHeight(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)}_getOffsetHeight(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height}_process(){const t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),i=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=i){const t=this._targets[this._targets.length-1];this._activeTarget!==t&&this._activate(t)}else{if(this._activeTarget&&t0)return this._activeTarget=null,void this._clear();for(let e=this._offsets.length;e--;)this._activeTarget!==this._targets[e]&&t>=this._offsets[e]&&(void 0===this._offsets[e+1]||t`${e}[data-bs-target="${t}"],${e}[href="${t}"]`),i=U.findOne(e.join(","),this._config.target);i.classList.add("active"),i.classList.contains("dropdown-item")?U.findOne(".dropdown-toggle",i.closest(".dropdown")).classList.add("active"):U.parents(i,".nav, .list-group").forEach(t=>{U.prev(t,".nav-link, .list-group-item").forEach(t=>t.classList.add("active")),U.prev(t,".nav-item").forEach(t=>{U.children(t,".nav-link").forEach(t=>t.classList.add("active"))})}),P.trigger(this._scrollElement,"activate.bs.scrollspy",{relatedTarget:t})}_clear(){U.find(ei,this._config.target).filter(t=>t.classList.contains("active")).forEach(t=>t.classList.remove("active"))}static jQueryInterface(t){return this.each((function(){const e=ii.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}P.on(window,"load.bs.scrollspy.data-api",()=>{U.find('[data-bs-spy="scroll"]').forEach(t=>new ii(t))}),m(ii);class ni extends H{static get NAME(){return"tab"}show(){if(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&this._element.classList.contains("active"))return;let t;const e=i(this._element),n=this._element.closest(".nav, .list-group");if(n){const e="UL"===n.nodeName||"OL"===n.nodeName?":scope > li > .active":".active";t=U.find(e,n),t=t[t.length-1]}const s=t?P.trigger(t,"hide.bs.tab",{relatedTarget:this._element}):null;if(P.trigger(this._element,"show.bs.tab",{relatedTarget:t}).defaultPrevented||null!==s&&s.defaultPrevented)return;this._activate(this._element,n);const o=()=>{P.trigger(t,"hidden.bs.tab",{relatedTarget:this._element}),P.trigger(this._element,"shown.bs.tab",{relatedTarget:t})};e?this._activate(e,e.parentNode,o):o()}_activate(t,e,i){const n=(!e||"UL"!==e.nodeName&&"OL"!==e.nodeName?U.children(e,".active"):U.find(":scope > li > .active",e))[0],s=i&&n&&n.classList.contains("fade"),o=()=>this._transitionComplete(t,n,i);n&&s?(n.classList.remove("show"),this._queueCallback(o,t,!0)):o()}_transitionComplete(t,e,i){if(e){e.classList.remove("active");const t=U.findOne(":scope > .dropdown-menu .active",e.parentNode);t&&t.classList.remove("active"),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}t.classList.add("active"),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),d(t),t.classList.contains("fade")&&t.classList.add("show");let n=t.parentNode;if(n&&"LI"===n.nodeName&&(n=n.parentNode),n&&n.classList.contains("dropdown-menu")){const e=t.closest(".dropdown");e&&U.find(".dropdown-toggle",e).forEach(t=>t.classList.add("active")),t.setAttribute("aria-expanded",!0)}i&&i()}static jQueryInterface(t){return this.each((function(){const e=ni.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}P.on(document,"click.bs.tab.data-api",'[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),l(this)||ni.getOrCreateInstance(this).show()})),m(ni);const si={animation:"boolean",autohide:"boolean",delay:"number"},oi={animation:!0,autohide:!0,delay:5e3};class ri extends H{constructor(t,e){super(t),this._config=this._getConfig(e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get DefaultType(){return si}static get Default(){return oi}static get NAME(){return"toast"}show(){P.trigger(this._element,"show.bs.toast").defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove("hide"),d(this._element),this._element.classList.add("show"),this._element.classList.add("showing"),this._queueCallback(()=>{this._element.classList.remove("showing"),P.trigger(this._element,"shown.bs.toast"),this._maybeScheduleHide()},this._element,this._config.animation))}hide(){this._element.classList.contains("show")&&(P.trigger(this._element,"hide.bs.toast").defaultPrevented||(this._element.classList.add("showing"),this._queueCallback(()=>{this._element.classList.add("hide"),this._element.classList.remove("showing"),this._element.classList.remove("show"),P.trigger(this._element,"hidden.bs.toast")},this._element,this._config.animation)))}dispose(){this._clearTimeout(),this._element.classList.contains("show")&&this._element.classList.remove("show"),super.dispose()}_getConfig(t){return t={...oi,...F.getDataAttributes(this._element),..."object"==typeof t&&t?t:{}},r("toast",t,this.constructor.DefaultType),t}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout(()=>{this.hide()},this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){P.on(this._element,"mouseover.bs.toast",t=>this._onInteraction(t,!0)),P.on(this._element,"mouseout.bs.toast",t=>this._onInteraction(t,!1)),P.on(this._element,"focusin.bs.toast",t=>this._onInteraction(t,!0)),P.on(this._element,"focusout.bs.toast",t=>this._onInteraction(t,!1))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=ri.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}return B(ri),m(ri),{Alert:R,Button:W,Carousel:Z,Collapse:et,Dropdown:Te,Modal:Pe,Offcanvas:He,Popover:Ze,ScrollSpy:ii,Tab:ni,Toast:ri,Tooltip:Xe}}));
+//# sourceMappingURL=bootstrap.bundle.min.js.map
\ No newline at end of file
diff --git a/tests/ci/report.py b/tests/ci/report.py
index ba6f22fed0b2..64cc681f62c6 100644
--- a/tests/ci/report.py
+++ b/tests/ci/report.py
@@ -603,7 +603,7 @@ def load(cls, build_name: str, pr_number: int, head_ref: str): # type: ignore
loads report from a report file matched with given @pr_number and/or a @head_ref
"""
report_path = Path(REPORT_PATH) / BuildResult.get_report_name(
- build_name, pr_number or head_ref
+ build_name, pr_number or CI.Utils.normalize_string(head_ref)
)
return cls.load_from_file(report_path)
@@ -622,12 +622,13 @@ def load_any(cls, build_name: str, pr_number: int, head_ref: str): # type: igno
master_report = None
any_report = None
Path(REPORT_PATH).mkdir(parents=True, exist_ok=True)
+ normalized_head_ref = CI.Utils.normalize_string(head_ref)
for file in Path(REPORT_PATH).iterdir():
if f"{build_name}.json" in file.name:
any_report = file
if "_master_" in file.name:
master_report = file
- elif f"_{head_ref}_" in file.name:
+ elif f"_{normalized_head_ref}_" in file.name:
ref_report = file
elif pr_number and f"_{pr_number}_" in file.name:
pr_report = file
diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py
index d0aa034258a5..8f66a01ed201 100644
--- a/tests/ci/s3_helper.py
+++ b/tests/ci/s3_helper.py
@@ -6,6 +6,7 @@
from multiprocessing.dummy import Pool
from pathlib import Path
from typing import Any, List, Union
+import os
import boto3 # type: ignore
import botocore # type: ignore
@@ -20,6 +21,38 @@
S3_URL,
)
+sensitive_var_pattern = re.compile(r"[A-Z_]*(SECRET|PASSWORD|KEY|TOKEN|AZURE)[A-Z_]*")
+sensitive_strings = {
+ var: value for var, value in os.environ.items() if sensitive_var_pattern.match(var)
+}
+
+
+def scan_file_for_sensitive_data(file_content, file_name):
+ """
+ Scan the content of a file for sensitive strings.
+ Raises ValueError if any sensitive values are found.
+ """
+
+ def clean_line(line):
+ for name, value in sensitive_strings.items():
+ line = line.replace(value, f"SECRET[{name}]")
+ return line
+
+ matches = []
+ for line_number, line in enumerate(file_content.splitlines(), start=1):
+ for name, value in sensitive_strings.items():
+ if value in line:
+ matches.append((file_name, line_number, clean_line(line)))
+
+ if not matches:
+ return
+
+ logging.error(f"Sensitive values found in {file_name}")
+ for file_name, line_number, match in matches:
+ logging.error(f"{file_name}:{line_number}: {match}")
+
+ raise ValueError(f"Sensitive values found in {file_name}")
+
def _flatten_list(lst):
result = []
@@ -46,6 +79,14 @@ def __init__(self, client: Any = None, endpoint: str = S3_URL):
def _upload_file_to_s3(
self, bucket_name: str, file_path: Path, s3_path: str
) -> str:
+ logging.debug("Checking %s for sensitive values", file_path)
+ try:
+ file_content = file_path.read_text(encoding="utf-8")
+ except UnicodeDecodeError:
+ logging.warning("Failed to scan file %s, unknown encoding", file_path)
+ else:
+ scan_file_for_sensitive_data(file_content, file_path.name)
+
logging.debug(
"Start uploading %s to bucket=%s path=%s", file_path, bucket_name, s3_path
)
diff --git a/tests/ci/sign_release.py b/tests/ci/sign_release.py
new file mode 100644
index 000000000000..8a5827097c8b
--- /dev/null
+++ b/tests/ci/sign_release.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+import sys
+import os
+import logging
+from env_helper import TEMP_PATH, REPO_COPY, REPORT_PATH
+from s3_helper import S3Helper
+from pr_info import PRInfo
+from build_download_helper import download_builds_filter
+import hashlib
+from pathlib import Path
+
+GPG_BINARY_SIGNING_KEY = os.getenv("GPG_BINARY_SIGNING_KEY")
+GPG_BINARY_SIGNING_PASSPHRASE = os.getenv("GPG_BINARY_SIGNING_PASSPHRASE")
+
+CHECK_NAME = os.getenv("CHECK_NAME", "Sign release")
+
+def hash_file(file_path):
+ BLOCK_SIZE = 65536 # The size of each read from the file
+
+ file_hash = hashlib.sha256() # Create the hash object, can use something other than `.sha256()` if you wish
+ with open(file_path, 'rb') as f: # Open the file to read it's bytes
+ fb = f.read(BLOCK_SIZE) # Read from the file. Take in the amount declared above
+ while len(fb) > 0: # While there is still data being read from the file
+ file_hash.update(fb) # Update the hash
+ fb = f.read(BLOCK_SIZE) # Read the next block from the file
+
+ hash_file_path = file_path + '.sha256'
+ with open(hash_file_path, 'x') as f:
+ digest = file_hash.hexdigest()
+ f.write(digest)
+ print(f'Hashed {file_path}: {digest}')
+
+ return hash_file_path
+
+def sign_file(file_path):
+ priv_key_file_path = 'priv.key'
+ with open(priv_key_file_path, 'x') as f:
+ f.write(GPG_BINARY_SIGNING_KEY)
+
+ out_file_path = f'{file_path}.gpg'
+
+ os.system(f'echo {GPG_BINARY_SIGNING_PASSPHRASE} | gpg --batch --import {priv_key_file_path}')
+ os.system(f'gpg -o {out_file_path} --pinentry-mode=loopback --batch --yes --passphrase {GPG_BINARY_SIGNING_PASSPHRASE} --sign {file_path}')
+ print(f"Signed {file_path}")
+ os.remove(priv_key_file_path)
+
+ return out_file_path
+
+def main():
+ reports_path = Path(REPORT_PATH)
+
+ if not os.path.exists(TEMP_PATH):
+ os.makedirs(TEMP_PATH)
+
+ pr_info = PRInfo()
+
+ logging.info("Repo copy path %s", REPO_COPY)
+
+ s3_helper = S3Helper()
+
+ s3_path_prefix = Path(f"{pr_info.number}/{pr_info.sha}/" + CHECK_NAME.lower().replace(
+ " ", "_"
+ ).replace("(", "_").replace(")", "_").replace(",", "_"))
+
+ # downloads `package_release` artifacts generated
+ download_builds_filter(CHECK_NAME, reports_path, Path(TEMP_PATH))
+
+ for f in os.listdir(TEMP_PATH):
+ full_path = os.path.join(TEMP_PATH, f)
+ if os.path.isdir(full_path):
+ continue
+ hashed_file_path = hash_file(full_path)
+ signed_file_path = sign_file(hashed_file_path)
+ s3_path = s3_path_prefix / os.path.basename(signed_file_path)
+ s3_helper.upload_build_file_to_s3(Path(signed_file_path), str(s3_path))
+ print(f'Uploaded file {signed_file_path} to {s3_path}')
+
+ # Signed hashes are:
+ # clickhouse-client_22.3.15.2.altinitystable_amd64.deb.sha512.gpg clickhouse-keeper_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg
+ # clickhouse-client-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg clickhouse-keeper-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg
+ # clickhouse-client_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg clickhouse-keeper-dbg_22.3.15.2.altinitystable_amd64.deb.sha512.gpg
+ # clickhouse-client-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg clickhouse-keeper-dbg-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg
+ # clickhouse-common-static_22.3.15.2.altinitystable_amd64.deb.sha512.gpg clickhouse-keeper-dbg_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg
+ # clickhouse-common-static-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg clickhouse-keeper-dbg-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg
+ # clickhouse-common-static_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg clickhouse-keeper.sha512.gpg
+ # clickhouse-common-static-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg clickhouse-library-bridge.sha512.gpg
+ # clickhouse-common-static-dbg_22.3.15.2.altinitystable_amd64.deb.sha512.gpg clickhouse-odbc-bridge.sha512.gpg
+ # clickhouse-common-static-dbg-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg clickhouse-server_22.3.15.2.altinitystable_amd64.deb.sha512.gpg
+ # clickhouse-common-static-dbg_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg clickhouse-server-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg
+ # clickhouse-common-static-dbg-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg clickhouse-server_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg
+ # clickhouse-keeper_22.3.15.2.altinitystable_amd64.deb.sha512.gpg clickhouse-server-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg
+ # clickhouse-keeper-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg clickhouse.sha512.gpg
+
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/ci/sqlancer_check.py b/tests/ci/sqlancer_check.py
index 42389d0a341f..1be47d252bfa 100644
--- a/tests/ci/sqlancer_check.py
+++ b/tests/ci/sqlancer_check.py
@@ -14,7 +14,7 @@
from stopwatch import Stopwatch
from tee_popen import TeePopen
-IMAGE_NAME = "clickhouse/sqlancer-test"
+IMAGE_NAME = "altinityinfra/sqlancer-test"
def get_run_command(download_url: str, workspace_path: Path, image: DockerImage) -> str:
diff --git a/tests/ci/sqllogic_test.py b/tests/ci/sqllogic_test.py
index 4464302a1229..7befab12cd21 100755
--- a/tests/ci/sqllogic_test.py
+++ b/tests/ci/sqllogic_test.py
@@ -26,7 +26,7 @@
from tee_popen import TeePopen
NO_CHANGES_MSG = "Nothing to run"
-IMAGE_NAME = "clickhouse/sqllogic-test"
+IMAGE_NAME = "altinityinfra/sqllogic-test"
def get_run_command(
diff --git a/tests/ci/sqltest.py b/tests/ci/sqltest.py
index b3e4233f6546..92026db4d7b1 100644
--- a/tests/ci/sqltest.py
+++ b/tests/ci/sqltest.py
@@ -14,7 +14,7 @@
from report import SUCCESS, JobReport, TestResult
from stopwatch import Stopwatch
-IMAGE_NAME = "clickhouse/sqltest"
+IMAGE_NAME = "altinityinfra/sqltest"
def get_run_command(pr_number, sha, download_url, workspace_path, image):
diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py
index 4b92797fde35..1870c9dfac2c 100644
--- a/tests/ci/stress_check.py
+++ b/tests/ci/stress_check.py
@@ -24,9 +24,17 @@
class SensitiveFormatter(logging.Formatter):
@staticmethod
def _filter(s):
- return re.sub(
- r"(.*)(AZURE_CONNECTION_STRING.*\')(.*)", r"\1AZURE_CONNECTION_STRING\3", s
+ s = re.sub(r"(.*)(AZURE_STORAGE_KEY\S*\')(.*)", r"\1AZURE_STORAGE_KEY\3", s)
+ s = re.sub(r"(.*)(AZURE_ACCOUNT_NAME\S*\')(.*)", r"\1AZURE_ACCOUNT_NAME\3", s)
+ s = re.sub(
+ r"(.*)(AZURE_CONTAINER_NAME\S*\')(.*)", r"\1AZURE_CONTAINER_NAME\3", s
)
+ s = re.sub(
+ r"(.*)(AZURE_STORAGE_ACCOUNT_URL\S*\')(.*)",
+ r"\1AZURE_STORAGE_ACCOUNT_URL\3",
+ s,
+ )
+ return s
def format(self, record):
original = logging.Formatter.format(self, record)
@@ -35,14 +43,20 @@ def format(self, record):
def get_additional_envs(check_name: str) -> List[str]:
result = []
- azure_connection_string = get_parameter_from_ssm("azure_connection_string")
- result.append(f"AZURE_CONNECTION_STRING='{azure_connection_string}'")
- # some cloud-specific features require feature flags enabled
- # so we need this ENV to be able to disable the randomization
- # of feature flags
+ # Get Azure credentials from environment variables
+ azure_account_name = os.environ.get("AZURE_ACCOUNT_NAME")
+
+ if azure_account_name:
+ result.append(f"AZURE_ACCOUNT_NAME='{azure_account_name}'")
+ result.append(f"AZURE_STORAGE_KEY='{os.environ['AZURE_STORAGE_KEY']}'")
+ result.append(f"AZURE_CONTAINER_NAME='{os.environ['AZURE_CONTAINER_NAME']}'")
+ result.append(
+ f"AZURE_STORAGE_ACCOUNT_URL='{os.environ['AZURE_STORAGE_ACCOUNT_URL']}'"
+ )
+
result.append("RANDOMIZE_KEEPER_FEATURE_FLAGS=1")
- if "azure" in check_name:
- result.append("USE_AZURE_STORAGE_FOR_MERGE_TREE=1")
+ # if "azure" in check_name:
+ # result.append("USE_AZURE_STORAGE_FOR_MERGE_TREE=1")
if "s3" in check_name:
result.append("USE_S3_STORAGE_FOR_MERGE_TREE=1")
@@ -163,10 +177,10 @@ def run_stress_test(upgrade_check: bool = False) -> None:
# this is praktika based CI
print("Copy input *.deb artifacts")
assert Shell.check(f"cp {REPO_COPY}/ci/tmp/*.deb {packages_path}", verbose=True)
- docker_image = pull_image(get_docker_image("clickhouse/stateful-test"))
+ docker_image = pull_image(get_docker_image("altinityinfra/stateful-test"))
else:
download_all_deb_packages(check_name, reports_path, packages_path)
- docker_image = pull_image(get_docker_image("clickhouse/stress-test"))
+ docker_image = pull_image(get_docker_image("altinityinfra/stress-test"))
server_log_path = temp_path / "server_log"
server_log_path.mkdir(parents=True, exist_ok=True)
diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py
index 81f903212704..dc1efdec132b 100644
--- a/tests/ci/test_ci_config.py
+++ b/tests/ci/test_ci_config.py
@@ -148,7 +148,6 @@ def test_builds_configs(self):
CI.JobNames.STYLE_CHECK,
CI.JobNames.FAST_TEST,
CI.JobNames.BUILD_CHECK,
- CI.JobNames.DOCS_CHECK,
CI.JobNames.BUGFIX_VALIDATE,
):
expected_builds = []
diff --git a/tests/ci/test_ci_options.py b/tests/ci/test_ci_options.py
index a3ff64e5c5ca..b78f56153db6 100644
--- a/tests/ci/test_ci_options.py
+++ b/tests/ci/test_ci_options.py
@@ -126,7 +126,6 @@
"ClickBench (aarch64)",
"libFuzzer tests",
"Builds",
- "Docs check",
"Bugfix validation",
]
diff --git a/tests/ci/test_git.py b/tests/ci/test_git.py
index 60cd95b6869d..833582e01bd7 100644
--- a/tests/ci/test_git.py
+++ b/tests/ci/test_git.py
@@ -56,16 +56,16 @@ def setUp(self):
self.git.commits_since_new = 0
def test_tags(self):
- self.git.new_tag = "v21.12.333.22222-stable"
- self.git.latest_tag = "v21.12.333.22222-stable"
+ self.git.new_tag = "v21.12.333.22222-altinitystable"
+ self.git.latest_tag = "v21.12.333.22222-altinitystable"
for tag_attr in ("new_tag", "latest_tag"):
- self.assertEqual(getattr(self.git, tag_attr), "v21.12.333.22222-stable")
+ self.assertEqual(getattr(self.git, tag_attr), "v21.12.333.22222-altinitystable")
setattr(self.git, tag_attr, "")
self.assertEqual(getattr(self.git, tag_attr), "")
for tag in (
- "v21.12.333-stable",
+ "v21.12.333-altinitystable",
"v21.12.333-prestable",
- "21.12.333.22222-stable",
+ "21.12.333.22222-altinitystable",
"v21.12.333.22222-production",
):
with self.assertRaises(Exception):
diff --git a/tests/ci/test_version.py b/tests/ci/test_version.py
index c4f12091ec09..4591799f76e7 100644
--- a/tests/ci/test_version.py
+++ b/tests/ci/test_version.py
@@ -18,7 +18,7 @@ def test_version_arg(self):
("1.1.1.2", vh.get_version_from_string("1.1.1.2")),
("v11.1.1.2-lts", vh.get_version_from_string("11.1.1.2")),
("v01.1.1.2-prestable", vh.get_version_from_string("1.1.1.2")),
- ("v21.1.1.2-stable", vh.get_version_from_string("21.1.1.2")),
+ ("v21.1.1.2-altinitystable", vh.get_version_from_string("21.1.1.2")),
("v31.1.1.2-testing", vh.get_version_from_string("31.1.1.2")),
("refs/tags/v31.1.1.2-testing", vh.get_version_from_string("31.1.1.2")),
)
@@ -48,44 +48,52 @@ class TestCase:
expected: CHV
cases = (
+ # TestCase(
+ # "v24.6.1.1-new",
+ # 15,
+ # "v24.4.1.2088-stable",
+ # 415,
+ # CHV(24, 5, 1, 54487, None, 415),
+ # ),
+ # TestCase(
+ # "v24.6.1.1-testing",
+ # 15,
+ # "v24.4.1.2088-stable",
+ # 415,
+ # CHV(24, 5, 1, 54487, None, 15),
+ # ),
+ # TestCase(
+ # "v24.6.1.1-stable",
+ # 15,
+ # "v24.4.1.2088-stable",
+ # 415,
+ # CHV(24, 5, 1, 54487, None, 15),
+ # ),
+ # TestCase(
+ # "v24.5.1.1-stable",
+ # 15,
+ # "v24.4.1.2088-stable",
+ # 415,
+ # CHV(24, 5, 1, 54487, None, 15),
+ # ),
TestCase(
- "v24.6.1.1-new",
- 15,
+ "v24.5.1.100-stable",
+ 0,
"v24.4.1.2088-stable",
415,
- CHV(24, 5, 1, 54487, None, 415),
- ),
- TestCase(
- "v24.6.1.1-testing",
- 15,
- "v24.4.1.2088-stable",
- 415,
- CHV(24, 5, 1, 54487, None, 16),
- ),
- TestCase(
- "v24.6.1.1-stable",
- 15,
- "v24.4.1.2088-stable",
- 415,
- CHV(24, 5, 1, 54487, None, 15),
- ),
- TestCase(
- "v24.5.1.1-stable",
- 15,
- "v24.4.1.2088-stable",
- 415,
- CHV(24, 5, 1, 54487, None, 15),
+ CHV(24, 5, 1, 54487, None, 100),
),
)
git = Git(True)
- for tc in cases:
- git.latest_tag = tc.latest_tag
- git.commits_since_latest = tc.commits_since_latest
- git.new_tag = tc.new_tag
- git.commits_since_new = tc.commits_since_new
- self.assertEqual(
- vh.get_version_from_repo(
- Path("tests/ci/tests/autogenerated_versions.txt"), git
- ),
- tc.expected,
- )
+ for i, tc in enumerate(cases):
+ with self.subTest(tc, i=i):
+ git.latest_tag = tc.latest_tag
+ git.commits_since_latest = tc.commits_since_latest
+ git.new_tag = tc.new_tag
+ git.commits_since_new = tc.commits_since_new
+ self.assertEqual(
+ vh.get_version_from_repo(
+ Path("tests/ci/tests/autogenerated_versions.txt"), git
+ ),
+ tc.expected,
+ )
diff --git a/tests/ci/tests/autogenerated_versions.txt b/tests/ci/tests/autogenerated_versions.txt
index 10028bf50c81..675fc161a2fc 100644
--- a/tests/ci/tests/autogenerated_versions.txt
+++ b/tests/ci/tests/autogenerated_versions.txt
@@ -6,6 +6,7 @@ SET(VERSION_REVISION 54487)
SET(VERSION_MAJOR 24)
SET(VERSION_MINOR 5)
SET(VERSION_PATCH 1)
+SET(VERSION_TWEAK 15)
SET(VERSION_GITHASH 70a1d3a63d47f0be077d67b8deb907230fc7cfb0)
SET(VERSION_DESCRIBE v24.5.1.1-testing)
SET(VERSION_STRING 24.5.1.1)
diff --git a/tests/ci/tests/docker_images_for_tests.json b/tests/ci/tests/docker_images_for_tests.json
index eb22fd8fa1e1..1ab4af5c156b 100644
--- a/tests/ci/tests/docker_images_for_tests.json
+++ b/tests/ci/tests/docker_images_for_tests.json
@@ -1,120 +1,116 @@
{
"docker/packager/deb": {
- "name": "clickhouse/deb-builder",
+ "name": "altinityinfra/deb-builder",
"dependent": []
},
"docker/packager/binary": {
- "name": "clickhouse/binary-builder",
+ "name": "altinityinfra/binary-builder",
"dependent": [
"docker/test/codebrowser"
]
},
"docker/test/compatibility/centos": {
- "name": "clickhouse/test-old-centos",
+ "name": "altinityinfra/test-old-centos",
"dependent": []
},
"docker/test/compatibility/ubuntu": {
- "name": "clickhouse/test-old-ubuntu",
+ "name": "altinityinfra/test-old-ubuntu",
"dependent": []
},
"docker/test/integration/base": {
- "name": "clickhouse/integration-test",
+ "name": "altinityinfra/integration-test",
"dependent": []
},
"docker/test/fuzzer": {
- "name": "clickhouse/fuzzer",
+ "name": "altinityinfra/fuzzer",
"dependent": []
},
"docker/test/performance-comparison": {
- "name": "clickhouse/performance-comparison",
+ "name": "altinityinfra/performance-comparison",
"dependent": []
},
"docker/test/util": {
- "name": "clickhouse/test-util",
+ "name": "altinityinfra/test-util",
"dependent": [
"docker/test/base",
"docker/test/fasttest"
]
},
"docker/test/stateless": {
- "name": "clickhouse/stateless-test",
+ "name": "altinityinfra/stateless-test",
"dependent": [
"docker/test/stateful",
"docker/test/unit"
]
},
"docker/test/stateful": {
- "name": "clickhouse/stateful-test",
+ "name": "altinityinfra/stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/unit": {
- "name": "clickhouse/unit-test",
+ "name": "altinityinfra/unit-test",
"dependent": []
},
"docker/test/stress": {
- "name": "clickhouse/stress-test",
+ "name": "altinityinfra/stress-test",
"dependent": []
},
"docker/test/codebrowser": {
- "name": "clickhouse/codebrowser",
+ "name": "altinityinfra/codebrowser",
"dependent": []
},
"docker/test/integration/runner": {
- "name": "clickhouse/integration-tests-runner",
+ "name": "altinityinfra/integration-tests-runner",
"dependent": []
},
"docker/test/fasttest": {
- "name": "clickhouse/fasttest",
- "dependent": []
- },
- "docker/test/style": {
- "name": "clickhouse/style-test",
+ "name": "altinityinfra/fasttest",
"dependent": []
},
"docker/test/integration/s3_proxy": {
- "name": "clickhouse/s3-proxy",
+ "name": "altinityinfra/s3-proxy",
"dependent": []
},
"docker/test/integration/resolver": {
- "name": "clickhouse/python-bottle",
+ "name": "altinityinfra/python-bottle",
"dependent": []
},
"docker/test/integration/helper_container": {
- "name": "clickhouse/integration-helper",
+ "name": "altinityinfra/integration-helper",
"dependent": []
},
"docker/test/integration/mysql_golang_client": {
- "name": "clickhouse/mysql-golang-client",
+ "name": "altinityinfra/mysql-golang-client",
"dependent": []
},
"docker/test/integration/dotnet_client": {
- "name": "clickhouse/dotnet-client",
+ "name": "altinityinfra/dotnet-client",
"dependent": []
},
"docker/test/integration/mysql_java_client": {
- "name": "clickhouse/mysql-java-client",
+ "name": "altinityinfra/mysql-java-client",
"dependent": []
},
"docker/test/integration/mysql_js_client": {
- "name": "clickhouse/mysql-js-client",
+ "name": "altinityinfra/mysql-js-client",
"dependent": []
},
"docker/test/integration/mysql_php_client": {
- "name": "clickhouse/mysql-php-client",
+ "name": "altinityinfra/mysql-php-client",
"dependent": []
},
"docker/test/integration/postgresql_java_client": {
- "name": "clickhouse/postgresql-java-client",
+ "name": "altinityinfra/postgresql-java-client",
"dependent": []
},
"docker/test/integration/kerberos_kdc": {
- "name": "clickhouse/kerberos-kdc",
+ "name": "altinityinfra/kerberos-kdc",
"dependent": []
},
"docker/test/base": {
- "name": "clickhouse/test-base",
+ "name": "altinityinfra/test-base",
"dependent": [
"docker/test/stateless",
"docker/test/integration/base",
@@ -124,35 +120,27 @@
]
},
"docker/test/sqlancer": {
- "name": "clickhouse/sqlancer-test",
+ "name": "altinityinfra/sqlancer-test",
"dependent": []
},
"docker/test/keeper-jepsen": {
- "name": "clickhouse/keeper-jepsen-test",
+ "name": "altinityinfra/keeper-jepsen-test",
"dependent": []
},
- "docker/docs/builder": {
- "name": "clickhouse/docs-builder",
- "only_amd64": true,
- "dependent": [
- "docker/docs/check",
- "docker/docs/release"
- ]
- },
"docker/docs/check": {
- "name": "clickhouse/docs-check",
+ "name": "altinityinfra/docs-check",
"dependent": []
},
"docker/docs/release": {
- "name": "clickhouse/docs-release",
+ "name": "altinityinfra/docs-release",
"dependent": []
},
"docker/test/sqllogic": {
- "name": "clickhouse/sqllogic-test",
+ "name": "altinityinfra/sqllogic-test",
"dependent": []
},
"docker/test/sqltest": {
- "name": "clickhouse/sqltest",
+ "name": "altinityinfra/sqltest",
"dependent": []
}
}
diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py
index 9cc8ec379bfe..7aabe009bc99 100644
--- a/tests/ci/unit_tests_check.py
+++ b/tests/ci/unit_tests_check.py
@@ -15,7 +15,7 @@
from stopwatch import Stopwatch
from tee_popen import TeePopen
-IMAGE_NAME = "clickhouse/unit-test"
+IMAGE_NAME = "altinityinfra/unit-test"
def get_test_name(line):
diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py
index c767bf5a1d89..e5a749a86c7d 100755
--- a/tests/ci/version_helper.py
+++ b/tests/ci/version_helper.py
@@ -4,7 +4,9 @@
from pathlib import Path
from typing import Any, Dict, Iterable, List, Literal, Optional, Set, Tuple, Union
-from git_helper import TWEAK, Git, get_tags, git_runner, removeprefix
+from pr_info import PRInfo # grype scan needs to know the PR number
+
+from git_helper import TWEAK, Git, get_tags, git_runner, removeprefix, VersionType
FILE_WITH_VERSION_PATH = "cmake/autogenerated_versions.txt"
CHANGELOG_IN_PATH = "debian/changelog.in"
@@ -29,6 +31,8 @@
SET(VERSION_MINOR {minor})
SET(VERSION_PATCH {patch})
SET(VERSION_GITHASH {githash})
+SET(VERSION_TWEAK {tweak})
+SET(VERSION_FLAVOUR {flavour})
SET(VERSION_DESCRIBE {describe})
SET(VERSION_STRING {string})
# end of autochange
@@ -48,6 +52,7 @@ def __init__(
revision: Union[int, str],
git: Optional[Git],
tweak: Optional[Union[int, str]] = None,
+ flavour: Optional[str] = None,
):
self._major = int(major)
self._minor = int(minor)
@@ -61,6 +66,7 @@ def __init__(
self._tweak = self._git.tweak
self._describe = ""
self._description = ""
+ self._flavour = flavour
def update(self, part: PART_TYPE) -> "ClickHouseVersion":
"""If part is valid, returns a new version"""
@@ -86,16 +92,6 @@ def bump(self) -> "ClickHouseVersion":
self._tweak = 1
return self
- def bump_patch(self) -> "ClickHouseVersion":
- self._revision += 1
- self._patch += 1
- self._tweak = 1
- return self
-
- def reset_tweak(self) -> "ClickHouseVersion":
- self._tweak = 1
- return self
-
def major_update(self) -> "ClickHouseVersion":
if self._git is not None:
self._git.update()
@@ -115,6 +111,13 @@ def patch_update(self) -> "ClickHouseVersion":
self.major, self.minor, self.patch + 1, self.revision, self._git
)
+ def reset_tweak(self) -> "ClickHouseVersion":
+ if self._git is not None:
+ self._git.update()
+ return ClickHouseVersion(
+ self.major, self.minor, self.patch, self.revision, self._git, 1
+ )
+
@property
def major(self) -> int:
return self._major
@@ -154,22 +157,24 @@ def describe(self):
def description(self) -> str:
return self._description
+ @property
+ def flavour(self) -> str:
+ return self._flavour
+
@property
def string(self):
- return ".".join(
+ version_as_string = ".".join(
(str(self.major), str(self.minor), str(self.patch), str(self.tweak))
)
+ if self._flavour:
+ version_as_string = f"{version_as_string}.{self._flavour}"
+ return version_as_string
@property
def is_lts(self) -> bool:
"""our X.3 and X.8 are LTS"""
return self.minor % 5 == 3
- @property
- def is_supported(self) -> bool:
- "we can support only versions with VersionType STABLE or LTS"
- return self.description in (VersionType.STABLE, VersionType.LTS)
-
def get_stable_release_type(self) -> str:
if self.is_lts:
return VersionType.LTS
@@ -185,6 +190,7 @@ def as_dict(self) -> VERSIONS:
"githash": self.githash,
"describe": self.describe,
"string": self.string,
+ "flavour": self.flavour
}
def as_tuple(self) -> Tuple[int, int, int, int]:
@@ -194,7 +200,10 @@ def with_description(self, version_type):
if version_type not in VersionType.VALID:
raise ValueError(f"version type {version_type} not in {VersionType.VALID}")
self._description = version_type
- self._describe = f"v{self.string}-{version_type}"
+ if version_type == self._flavour:
+ self._describe = f"v{self.string}"
+ else:
+ self._describe = f"v{self.string}-{version_type}"
return self
def copy(self) -> "ClickHouseVersion":
@@ -228,7 +237,7 @@ def __lt__(self, other: Any) -> bool:
for part in ("major", "minor", "patch", "tweak"):
if getattr(self, part) < getattr(other, part):
return True
- if getattr(self, part) > getattr(other, part):
+ elif getattr(self, part) > getattr(other, part):
return False
return False
@@ -251,21 +260,12 @@ def __repr__(self):
ClickHouseVersions = List[ClickHouseVersion]
-
-class VersionType:
- LTS = "lts"
- NEW = "new"
- PRESTABLE = "prestable"
- STABLE = "stable"
- TESTING = "testing"
- VALID = (NEW, TESTING, PRESTABLE, STABLE, LTS)
-
-
def validate_version(version: str) -> None:
+ # NOTE(vnemkov): minor but important fixes, so versions with 'flavour' are threated as valid (e.g. 22.8.8.4.altinitystable)
parts = version.split(".")
- if len(parts) != 4:
+ if len(parts) < 4:
raise ValueError(f"{version} does not contain 4 parts")
- for part in parts:
+ for part in parts[:4]:
int(part)
@@ -306,18 +306,36 @@ def get_version_from_repo(
versions["patch"],
versions["revision"],
git,
+ # Explicitly use tweak value from version file
+ tweak=versions.get("tweak", versions["revision"]),
+ flavour=versions.get("flavour", None)
)
- # Since 24.5 we have tags like v24.6.1.1-new, and we must check if the release
- # branch already has it's own commit. It's necessary for a proper tweak version
+
+ # if this commit is tagged, use tag's version instead of something stored in cmake
if git is not None and git.latest_tag:
version_from_tag = get_version_from_tag(git.latest_tag)
- if (
- version_from_tag.description == VersionType.NEW
- and cmake_version < version_from_tag
- ):
- # We are in a new release branch without existing release.
- # We should change the tweak version to a `tweak_to_new`
- cmake_version.tweak = git.tweak_to_new
+ logging.debug(f'Git latest tag: {git.latest_tag} ({git.commits_since_latest} commits ago)\n'
+ f'"new" tag: {git.new_tag} ({git.commits_since_new})\n'
+ f'current commit: {git.sha}\n'
+ f'current brach: {git.branch}'
+ )
+ if git.latest_tag and git.commits_since_latest == 0:
+ # Tag has a priority over the version written in CMake.
+ # Version must match (except tweak, flavour, description, etc.) to avoid accidental mess.
+ if not (version_from_tag.major == cmake_version.major \
+ and version_from_tag.minor == cmake_version.minor \
+ and version_from_tag.patch == cmake_version.patch):
+ raise RuntimeError(f"Version generated from tag ({version_from_tag}) should have same major, minor, and patch values as version generated from cmake ({cmake_version})")
+
+ # Don't need to reset version completely, mostly because revision part is not set in tag, but must be preserved
+ logging.debug(f"Resetting TWEAK and FLAVOUR of version from cmake {cmake_version} to values from tag: {version_from_tag.tweak}.{version_from_tag._flavour}")
+ cmake_version._flavour = version_from_tag._flavour
+ cmake_version.tweak = version_from_tag.tweak
+ else:
+ # We've had some number of commits since the latest (upstream) tag.
+ logging.debug(f"Bumping the TWEAK of version from cmake {cmake_version} by {git.commits_since_upstream}")
+ cmake_version.tweak = cmake_version.tweak + git.commits_since_upstream
+
return cmake_version
@@ -325,15 +343,31 @@ def get_version_from_string(
version: str, git: Optional[Git] = None
) -> ClickHouseVersion:
validate_version(version)
- parts = version.split(".")
- return ClickHouseVersion(parts[0], parts[1], parts[2], -1, git, parts[3])
+ # dict for simple handling of missing parts with parts.get(index, default)
+ parts = dict(enumerate(version.split(".")))
+ return ClickHouseVersion(
+ parts[0],
+ parts[1],
+ parts[2],
+ -1,
+ git,
+ parts.get(3, None),
+ parts.get(4, None)
+ )
def get_version_from_tag(tag: str) -> ClickHouseVersion:
Git.check_tag(tag)
- tag, description = tag[1:].split("-", 1)
- version = get_version_from_string(tag)
- version.with_description(description)
+ tag = tag[1:] # strip initial 'v'
+ if '-' in tag:
+ # Upstream tags with dash
+ tag, description = tag.split("-", 1)
+ version = get_version_from_string(tag)
+ version.with_description(description)
+ else:
+ # Altinity's tags, with dots as separators between parts (handled properly down the road)
+ version = get_version_from_string(tag)
+
return version
@@ -371,7 +405,7 @@ def get_supported_versions(
versions = list(versions)
else:
# checks that repo is not shallow in background
- versions = [v for v in get_tagged_versions() if v.is_supported]
+ versions = get_tagged_versions()
versions.sort()
versions.reverse()
for version in versions:
@@ -433,7 +467,7 @@ def update_contributors(
get_abs_path(relative_contributors_path).write_text(content, encoding="utf-8")
-def update_version_local(version, version_type="testing"):
+def update_version_local(version : ClickHouseVersion, version_type="testing"):
update_contributors()
version.with_description(version_type)
update_cmake_version(version)
@@ -500,6 +534,12 @@ def main():
if args.update_part or args.update_cmake:
update_cmake_version(version)
+ # grype scan needs to know the PR number
+ pr_info = PRInfo()
+ print(f"PR_NUMBER={pr_info.number}")
+ if args.export:
+ print(f"export PR_NUMBER")
+
for k, v in version.as_dict().items():
name = f"CLICKHOUSE_VERSION_{k.upper()}"
print(f"{name}='{v}'")
diff --git a/tests/config/config.d/azure_storage_conf.xml b/tests/config/config.d/azure_storage_conf.xml
index 3ddef88fc620..2624656f7e88 100644
--- a/tests/config/config.d/azure_storage_conf.xml
+++ b/tests/config/config.d/azure_storage_conf.xml
@@ -6,8 +6,10 @@
azure
false
33554432
- openbucketforpublicci
-
+
+
+
+
cache
diff --git a/tests/docker_scripts/process_functional_tests_result.py b/tests/docker_scripts/process_functional_tests_result.py
index eb845ce36688..ecda0495bae2 100755
--- a/tests/docker_scripts/process_functional_tests_result.py
+++ b/tests/docker_scripts/process_functional_tests_result.py
@@ -4,6 +4,7 @@
import csv
import logging
import os
+import json
OK_SIGN = "[ OK "
FAIL_SIGN = "[ FAIL "
@@ -20,7 +21,7 @@
RETRIES_SIGN = "Some tests were restarted"
-def process_test_log(log_path, broken_tests):
+def process_test_log(log_path, broken_tests, known_failing_tests):
total = 0
skipped = 0
unknown = 0
@@ -71,7 +72,7 @@ def process_test_log(log_path, broken_tests):
failed += 1
test_results.append((test_name, "Timeout", test_time, []))
elif FAIL_SIGN in line:
- if test_name in broken_tests:
+ if test_name in broken_tests or test_name in known_failing_tests:
success += 1
test_results.append((test_name, "BROKEN", test_time, []))
else:
@@ -135,7 +136,9 @@ def process_test_log(log_path, broken_tests):
)
-def process_result(result_path, broken_tests, in_test_result_file, in_results_file):
+def process_result(
+ result_path, broken_tests, in_test_result_file, in_results_file, known_failing_tests
+):
test_results = []
state = "success"
description = ""
@@ -163,7 +166,7 @@ def process_result(result_path, broken_tests, in_test_result_file, in_results_fi
success_finish,
retries,
test_results,
- ) = process_test_log(result_path, broken_tests)
+ ) = process_test_log(result_path, broken_tests, known_failing_tests)
# Check test_results.tsv for sanitizer asserts, crashes and other critical errors.
# If the file is present, it's expected to be generated by stress_test.lib check for critical errors
@@ -248,6 +251,7 @@ def write_results(results_file, status_file, results, status):
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
parser.add_argument("--broken-tests", default="/repo/tests/analyzer_tech_debt.txt")
+ parser.add_argument("--broken-tests-json", default="/repo/tests/broken_tests.json")
args = parser.parse_args()
broken_tests = []
@@ -257,11 +261,22 @@ def write_results(results_file, status_file, results, status):
broken_tests = f.read().splitlines()
print(f"Broken tests in the list: {len(broken_tests)}")
+ known_failing_tests = list()
+ if os.path.exists(args.broken_tests_json):
+ logging.info(f"File {args.broken_tests_json} with broken tests found")
+
+ with open(args.broken_tests_json) as f:
+ known_failing_tests = list(json.load(f).keys())
+
+ if broken_tests:
+ print(f"Broken tests in the list: {len(broken_tests)}")
+
state, description, test_results = process_result(
args.in_results_dir,
broken_tests,
args.in_test_result_file,
args.in_results_file,
+ known_failing_tests,
)
logging.info("Result parsed")
status = (state, description)
diff --git a/tests/docker_scripts/stress_runner.sh b/tests/docker_scripts/stress_runner.sh
index 704b9c75ec77..682cf0041804 100755
--- a/tests/docker_scripts/stress_runner.sh
+++ b/tests/docker_scripts/stress_runner.sh
@@ -57,6 +57,10 @@ configure
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
+# NOTE(strtgbb): Trying to avoid errors that may be related to running out of resources
+export CLICKHOUSE_MAX_THREADS=8
+export CLICKHOUSE_MAX_CONCURRENT_QUERIES=4
+
start_server
setup_logs_replication
diff --git a/tests/integration/compose/docker_compose_clickhouse.yml b/tests/integration/compose/docker_compose_clickhouse.yml
index 8b73db02903e..e7f70a5af53b 100644
--- a/tests/integration/compose/docker_compose_clickhouse.yml
+++ b/tests/integration/compose/docker_compose_clickhouse.yml
@@ -1,4 +1,4 @@
# Used to pre-pull images with docker compose
services:
clickhouse1:
- image: clickhouse/integration-test
+ image: altinityinfra/integration-test
diff --git a/tests/integration/compose/docker_compose_dotnet_client.yml b/tests/integration/compose/docker_compose_dotnet_client.yml
index b44a47da5b19..ab1e4c5bf173 100644
--- a/tests/integration/compose/docker_compose_dotnet_client.yml
+++ b/tests/integration/compose/docker_compose_dotnet_client.yml
@@ -1,5 +1,5 @@
services:
dotnet1:
- image: clickhouse/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest}
+ image: altinityinfra/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/tests/integration/compose/docker_compose_jdbc_bridge.yml b/tests/integration/compose/docker_compose_jdbc_bridge.yml
index 26f575923a24..3a3ddf637c66 100644
--- a/tests/integration/compose/docker_compose_jdbc_bridge.yml
+++ b/tests/integration/compose/docker_compose_jdbc_bridge.yml
@@ -1,5 +1,6 @@
services:
bridge1:
+ # NOTE(vnemkov): not produced by CI/CD, so must not be replaced with altinityinfra/jdbc-bridge
image: clickhouse/jdbc-bridge
command: |
/bin/bash -c 'cat << EOF > config/datasources/self.json
diff --git a/tests/integration/compose/docker_compose_keeper.yml b/tests/integration/compose/docker_compose_keeper.yml
index fae1b7d0f88c..b20a90ba54db 100644
--- a/tests/integration/compose/docker_compose_keeper.yml
+++ b/tests/integration/compose/docker_compose_keeper.yml
@@ -1,6 +1,6 @@
services:
zoo1:
- image: ${image:-clickhouse/integration-test}
+ image: ${image:-altinityinfra/integration-test}
restart: always
user: ${user:-}
volumes:
@@ -37,7 +37,7 @@ services:
- inet6
- rotate
zoo2:
- image: ${image:-clickhouse/integration-test}
+ image: ${image:-altinityinfra/integration-test}
restart: always
user: ${user:-}
volumes:
@@ -74,7 +74,7 @@ services:
- inet6
- rotate
zoo3:
- image: ${image:-clickhouse/integration-test}
+ image: ${image:-altinityinfra/integration-test}
restart: always
user: ${user:-}
volumes:
diff --git a/tests/integration/compose/docker_compose_kerberized_kafka.yml b/tests/integration/compose/docker_compose_kerberized_kafka.yml
index 90bcf11a50f0..d9a1de8d4036 100644
--- a/tests/integration/compose/docker_compose_kerberized_kafka.yml
+++ b/tests/integration/compose/docker_compose_kerberized_kafka.yml
@@ -50,7 +50,7 @@ services:
net.ipv4.ip_local_port_range: '55000 65535'
kafka_kerberos:
- image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
+ image: altinityinfra/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: kafka_kerberos
volumes:
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab
diff --git a/tests/integration/compose/docker_compose_kerberos_kdc.yml b/tests/integration/compose/docker_compose_kerberos_kdc.yml
index 8cdac4118b9a..60eb9b039e81 100644
--- a/tests/integration/compose/docker_compose_kerberos_kdc.yml
+++ b/tests/integration/compose/docker_compose_kerberos_kdc.yml
@@ -1,6 +1,6 @@
services:
kerberoskdc:
- image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
+ image: altinityinfra/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest}
hostname: kerberoskdc
volumes:
- ${KERBEROS_KDC_DIR}/secrets:/tmp/keytab
diff --git a/tests/integration/compose/docker_compose_minio.yml b/tests/integration/compose/docker_compose_minio.yml
index 2e9cdc8cecd0..39016633f132 100644
--- a/tests/integration/compose/docker_compose_minio.yml
+++ b/tests/integration/compose/docker_compose_minio.yml
@@ -21,14 +21,14 @@ services:
# HTTP proxies for Minio.
proxy1:
- image: clickhouse/s3-proxy
+ image: altinityinfra/s3-proxy
expose:
- "8080" # Redirect proxy port
- "80" # Reverse proxy port
- "443" # Reverse proxy port (secure)
proxy2:
- image: clickhouse/s3-proxy
+ image: altinityinfra/s3-proxy
expose:
- "8080"
- "80"
@@ -36,7 +36,7 @@ services:
# Empty container to run proxy resolver.
resolver:
- image: clickhouse/python-bottle:${DOCKER_PYTHON_BOTTLE_TAG:-latest}
+ image: altinityinfra/python-bottle:${DOCKER_PYTHON_BOTTLE_TAG:-latest}
expose:
- "8080"
tty: true
diff --git a/tests/integration/compose/docker_compose_mysql_golang_client.yml b/tests/integration/compose/docker_compose_mysql_golang_client.yml
index 5268978b0fef..c8f1a9930cb6 100644
--- a/tests/integration/compose/docker_compose_mysql_golang_client.yml
+++ b/tests/integration/compose/docker_compose_mysql_golang_client.yml
@@ -1,5 +1,5 @@
services:
golang1:
- image: clickhouse/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
+ image: altinityinfra/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/tests/integration/compose/docker_compose_mysql_java_client.yml b/tests/integration/compose/docker_compose_mysql_java_client.yml
index 20c95a7d51ef..32dbc8301477 100644
--- a/tests/integration/compose/docker_compose_mysql_java_client.yml
+++ b/tests/integration/compose/docker_compose_mysql_java_client.yml
@@ -1,5 +1,5 @@
services:
java1:
- image: clickhouse/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
+ image: altinityinfra/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest}
# to keep container running
command: sleep 1d
diff --git a/tests/integration/compose/docker_compose_mysql_js_client.yml b/tests/integration/compose/docker_compose_mysql_js_client.yml
index be4edaead4a9..0d0caa9a6d52 100644
--- a/tests/integration/compose/docker_compose_mysql_js_client.yml
+++ b/tests/integration/compose/docker_compose_mysql_js_client.yml
@@ -1,5 +1,5 @@
services:
mysqljs1:
- image: clickhouse/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
+ image: altinityinfra/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/tests/integration/compose/docker_compose_mysql_php_client.yml b/tests/integration/compose/docker_compose_mysql_php_client.yml
index 0b00dedf1526..ff43274641e5 100644
--- a/tests/integration/compose/docker_compose_mysql_php_client.yml
+++ b/tests/integration/compose/docker_compose_mysql_php_client.yml
@@ -1,5 +1,5 @@
services:
php1:
- image: clickhouse/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
+ image: altinityinfra/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/tests/integration/compose/docker_compose_nginx.yml b/tests/integration/compose/docker_compose_nginx.yml
index f807b5534108..e6f0da96ce59 100644
--- a/tests/integration/compose/docker_compose_nginx.yml
+++ b/tests/integration/compose/docker_compose_nginx.yml
@@ -4,7 +4,7 @@ services:
# Files will be put into /usr/share/nginx/files.
nginx:
- image: clickhouse/nginx-dav:${DOCKER_NGINX_DAV_TAG:-latest}
+ image: altinityinfra/nginx-dav:${DOCKER_NGINX_DAV_TAG:-latest}
restart: always
ports:
# NOTE: It is enough to change only the host port, since in
diff --git a/tests/integration/compose/docker_compose_postgresql_java_client.yml b/tests/integration/compose/docker_compose_postgresql_java_client.yml
index 133dccd569e4..efaaaa6c22e1 100644
--- a/tests/integration/compose/docker_compose_postgresql_java_client.yml
+++ b/tests/integration/compose/docker_compose_postgresql_java_client.yml
@@ -1,5 +1,5 @@
services:
java:
- image: clickhouse/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
+ image: altinityinfra/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity
diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py
index 7c918d175528..337403af0646 100644
--- a/tests/integration/helpers/cluster.py
+++ b/tests/integration/helpers/cluster.py
@@ -85,7 +85,8 @@
# Minimum version we use in integration tests to check compatibility with old releases
# Keep in mind that we only support upgrading between releases that are at most 1 year different.
# This means that this minimum need to be, at least, 1 year older than the current release
-CLICKHOUSE_CI_MIN_TESTED_VERSION = "23.3"
+# NOTE(vnemkov): this is a docker tag, make sure it doesn't include initial 'v'
+CLICKHOUSE_CI_MIN_TESTED_VERSION = "23.3.19.33.altinitystable"
ZOOKEEPER_CONTAINERS = ("zoo1", "zoo2", "zoo3")
@@ -1043,7 +1044,7 @@ def setup_keeper_cmd(self, instance, env_variables, docker_compose_yml_dir):
env_variables["keeper_binary"] = binary_path
env_variables["keeper_cmd_prefix"] = keeper_cmd_prefix
- env_variables["image"] = "clickhouse/integration-test:" + DOCKER_BASE_TAG
+ env_variables["image"] = "altinityinfra/integration-test:" + DOCKER_BASE_TAG
env_variables["user"] = str(os.getuid())
env_variables["keeper_fs"] = "bind"
for i in range(1, 4):
@@ -1595,7 +1596,7 @@ def add_instance(
hostname=None,
env_variables=None,
instance_env_variables=False,
- image="clickhouse/integration-test",
+ image="altinityinfra/integration-test",
tag=None,
# keep the docker container running when clickhouse server is stopped
stay_alive=False,
@@ -3465,7 +3466,7 @@ def __init__(
hostname=None,
env_variables=None,
instance_env_variables=False,
- image="clickhouse/integration-test",
+ image="altinityinfra/integration-test",
tag="latest",
stay_alive=False,
ipv4_address=None,
@@ -4677,7 +4678,7 @@ def write_embedded_config(name, dest_dir, fix_log_level=False):
if (
self.randomize_settings
- and self.image == "clickhouse/integration-test"
+ and self.image == "altinityinfra/integration-test"
and self.tag == DOCKER_BASE_TAG
and self.base_config_dir == DEFAULT_BASE_CONFIG_DIR
):
diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py
index 153d4c2860cb..6a6ca8bdc46b 100644
--- a/tests/integration/helpers/network.py
+++ b/tests/integration/helpers/network.py
@@ -360,7 +360,7 @@ def __init__(
def _ensure_container(self):
if self._container is None or self._container_expire_time <= time.time():
- image_name = "clickhouse/integration-helper:" + os.getenv(
+ image_name = "altinityinfra/integration-helper:" + os.getenv(
"DOCKER_HELPER_TAG", "latest"
)
for i in range(5):
diff --git a/tests/integration/runner b/tests/integration/runner
index d58e28510dc1..0ab530dd8701 100755
--- a/tests/integration/runner
+++ b/tests/integration/runner
@@ -31,7 +31,7 @@ CONFIG_DIR_IN_REPO = "programs/server"
INTEGRATION_DIR_IN_REPO = "tests/integration"
UTILS_DIR_IN_REPO = "utils"
-DIND_INTEGRATION_TESTS_IMAGE_NAME = "clickhouse/integration-tests-runner"
+DIND_INTEGRATION_TESTS_IMAGE_NAME = "altinityinfra/integration-tests-runner"
def physical_memory() -> int:
diff --git a/tests/integration/test_attach_partition_using_copy/test.py b/tests/integration/test_attach_partition_using_copy/test.py
index 99fb2ff01530..206d339f333c 100644
--- a/tests/integration/test_attach_partition_using_copy/test.py
+++ b/tests/integration/test_attach_partition_using_copy/test.py
@@ -61,7 +61,9 @@ def create_source_table(node, table_name, replicated):
SETTINGS disk = disk(type = web, endpoint = 'https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/')
""".format(
table_name=table_name, engine=engine
- )
+ ),
+ timeout=60,
+ retry_count=3,
)
diff --git a/tests/integration/test_backward_compatibility/test.py b/tests/integration/test_backward_compatibility/test.py
index 0f9a57c7bbca..321dd9eab1ba 100644
--- a/tests/integration/test_backward_compatibility/test.py
+++ b/tests/integration/test_backward_compatibility/test.py
@@ -6,7 +6,7 @@
node1 = cluster.add_instance(
"node1",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py
index e4433fac30f2..0a2c787f86b9 100644
--- a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py
+++ b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py
@@ -6,7 +6,7 @@
node1 = cluster.add_instance(
"node1",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
with_installed_binary=True,
)
diff --git a/tests/integration/test_backward_compatibility/test_aggregate_function_state.py b/tests/integration/test_backward_compatibility/test_aggregate_function_state.py
index bb18244308b5..183596fe6b5e 100644
--- a/tests/integration/test_backward_compatibility/test_aggregate_function_state.py
+++ b/tests/integration/test_backward_compatibility/test_aggregate_function_state.py
@@ -6,7 +6,7 @@
node1 = cluster.add_instance(
"node1",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
@@ -14,7 +14,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_backward_compatibility/test_convert_ordinary.py b/tests/integration/test_backward_compatibility/test_convert_ordinary.py
index 0a29f4433621..37dc3193fff5 100644
--- a/tests/integration/test_backward_compatibility/test_convert_ordinary.py
+++ b/tests/integration/test_backward_compatibility/test_convert_ordinary.py
@@ -5,7 +5,7 @@
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node",
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_zookeeper=True,
diff --git a/tests/integration/test_backward_compatibility/test_cte_distributed.py b/tests/integration/test_backward_compatibility/test_cte_distributed.py
index 3fb0d3cc4289..e482b884acf1 100644
--- a/tests/integration/test_backward_compatibility/test_cte_distributed.py
+++ b/tests/integration/test_backward_compatibility/test_cte_distributed.py
@@ -7,7 +7,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_backward_compatibility/test_functions.py b/tests/integration/test_backward_compatibility/test_functions.py
index 74f3f9b128db..76f4cb5cb87e 100644
--- a/tests/integration/test_backward_compatibility/test_functions.py
+++ b/tests/integration/test_backward_compatibility/test_functions.py
@@ -14,7 +14,7 @@
upstream = cluster.add_instance("upstream", use_old_analyzer=True)
backward = cluster.add_instance(
"backward",
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
with_installed_binary=True,
)
diff --git a/tests/integration/test_backward_compatibility/test_insert_profile_events.py b/tests/integration/test_backward_compatibility/test_insert_profile_events.py
index dd1459a29371..cfa6ffe7c980 100644
--- a/tests/integration/test_backward_compatibility/test_insert_profile_events.py
+++ b/tests/integration/test_backward_compatibility/test_insert_profile_events.py
@@ -10,7 +10,7 @@
upstream_node = cluster.add_instance("upstream_node", use_old_analyzer=True)
old_node = cluster.add_instance(
"old_node",
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
with_installed_binary=True,
)
diff --git a/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py b/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py
index 8631bcc3f093..2d64129f7699 100644
--- a/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py
+++ b/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py
@@ -6,7 +6,7 @@
# Version 21.6.3.14 has incompatible partition id for tables with UUID in partition key.
node = cluster.add_instance(
"node",
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py b/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py
index 7b9f87d269d4..7876644a3538 100644
--- a/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py
+++ b/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py
@@ -6,7 +6,7 @@
node1 = cluster.add_instance(
"node1",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
@@ -14,7 +14,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py b/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py
index 5255f3c4265e..812e2d3ca280 100644
--- a/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py
+++ b/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py
@@ -7,7 +7,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py
index b900d97a7930..b0c463e55825 100644
--- a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py
+++ b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py
@@ -7,7 +7,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py
index 3a1ffaf7b486..6aa8c01e92f0 100644
--- a/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py
+++ b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py
@@ -7,7 +7,7 @@
node1 = cluster.add_instance(
"node1",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag="24.1" if is_arm() else CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
@@ -15,7 +15,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag="24.1" if is_arm() else CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py b/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py
index 599f5bf65575..633c521cd205 100644
--- a/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py
+++ b/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py
@@ -6,7 +6,7 @@
node_old = cluster.add_instance(
"node1",
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_cow_policy/test.py b/tests/integration/test_cow_policy/test.py
index 0d14eacbcb2b..db34c157f0ab 100644
--- a/tests/integration/test_cow_policy/test.py
+++ b/tests/integration/test_cow_policy/test.py
@@ -43,7 +43,9 @@ def test_cow_policy(start_cluster, storage_policy):
ENGINE = MergeTree
ORDER BY (postcode1, postcode2, addr1, addr2)
SETTINGS storage_policy = '{storage_policy}'
- """
+ """,
+ timeout=60,
+ retry_count=3,
)
prev_count = int(node.query("SELECT count() FROM uk_price_paid"))
assert prev_count > 0
diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py
index 2fee0ee8cc78..d0ef55de289c 100644
--- a/tests/integration/test_disk_over_web_server/test.py
+++ b/tests/integration/test_disk_over_web_server/test.py
@@ -37,7 +37,7 @@ def cluster():
with_nginx=True,
stay_alive=True,
with_installed_binary=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
)
cluster.add_instance(
diff --git a/tests/integration/test_distributed_insert_backward_compatibility/test.py b/tests/integration/test_distributed_insert_backward_compatibility/test.py
index f02d6fb13b91..9433edd71574 100644
--- a/tests/integration/test_distributed_insert_backward_compatibility/test.py
+++ b/tests/integration/test_distributed_insert_backward_compatibility/test.py
@@ -10,7 +10,7 @@
node_dist = cluster.add_instance(
"node2",
main_configs=["configs/remote_servers.xml"],
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_old_versions/test.py b/tests/integration/test_old_versions/test.py
index e1eb9257280b..0507642118ec 100644
--- a/tests/integration/test_old_versions/test.py
+++ b/tests/integration/test_old_versions/test.py
@@ -6,7 +6,7 @@
cluster = ClickHouseCluster(__file__)
node_oldest = cluster.add_instance(
"node_oldest",
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
with_installed_binary=True,
main_configs=["configs/config.d/test_cluster.xml"],
diff --git a/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py
index 49acf287481a..20dc47043145 100644
--- a/tests/integration/test_polymorphic_parts/test.py
+++ b/tests/integration/test_polymorphic_parts/test.py
@@ -365,7 +365,7 @@ def test_different_part_types_on_replicas(start_cluster, table, part_type):
"node7",
user_configs=["configs_old/users.d/not_optimize_count.xml"],
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_replicated_merge_tree_compatibility/test.py b/tests/integration/test_replicated_merge_tree_compatibility/test.py
index 3c1599aeb904..59ee325df237 100644
--- a/tests/integration/test_replicated_merge_tree_compatibility/test.py
+++ b/tests/integration/test_replicated_merge_tree_compatibility/test.py
@@ -6,7 +6,7 @@
node1 = cluster.add_instance(
"node1",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
@@ -14,7 +14,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_replicating_constants/test.py b/tests/integration/test_replicating_constants/test.py
index af8916dd625b..8da83038d146 100644
--- a/tests/integration/test_replicating_constants/test.py
+++ b/tests/integration/test_replicating_constants/test.py
@@ -8,8 +8,8 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
- tag="23.3",
+ image="altinity/clickhouse-server",
+ tag="23.3.19.33.altinitystable",
with_installed_binary=True,
)
diff --git a/tests/integration/test_system_ddl_worker_queue/configs/remote_servers.xml b/tests/integration/test_system_ddl_worker_queue/configs/remote_servers.xml
index 791af83a2d6d..f6392caf5e51 100644
--- a/tests/integration/test_system_ddl_worker_queue/configs/remote_servers.xml
+++ b/tests/integration/test_system_ddl_worker_queue/configs/remote_servers.xml
@@ -25,4 +25,5 @@
+ 1
diff --git a/tests/integration/test_system_ddl_worker_queue/test.py b/tests/integration/test_system_ddl_worker_queue/test.py
index 4659e5b92e84..1bebf709a821 100644
--- a/tests/integration/test_system_ddl_worker_queue/test.py
+++ b/tests/integration/test_system_ddl_worker_queue/test.py
@@ -1,4 +1,5 @@
import pytest
+import time
from helpers.cluster import ClickHouseCluster
@@ -25,46 +26,131 @@ def started_cluster():
try:
cluster.start()
- for i, node in enumerate([node1, node2]):
- node.query("CREATE DATABASE testdb")
- node.query(
- """CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table1', '{}') ORDER BY id;""".format(
- i
- )
- )
- for i, node in enumerate([node3, node4]):
- node.query("CREATE DATABASE testdb")
- node.query(
- """CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table2', '{}') ORDER BY id;""".format(
- i
- )
- )
yield cluster
finally:
cluster.shutdown()
+def maintain_test_table(test_table):
+ tmark = time.time() # to guarantee ZK path uniqueness
+
+ for i, node in enumerate([node1, node2]):
+ node.query(f"DROP TABLE IF EXISTS testdb.{test_table} SYNC")
+ node.query("DROP DATABASE IF EXISTS testdb")
+
+ node.query("CREATE DATABASE testdb")
+ node.query(
+ f"CREATE TABLE testdb.{test_table}(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/{test_table}1-{tmark}', '{i}') ORDER BY id;"
+ )
+ for i, node in enumerate([node3, node4]):
+ node.query(f"DROP TABLE IF EXISTS testdb.{test_table} SYNC")
+ node.query("DROP DATABASE IF EXISTS testdb")
+
+ node.query("CREATE DATABASE testdb")
+ node.query(
+ f"CREATE TABLE testdb.{test_table}(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/{test_table}2-{tmark}', '{i}') ORDER BY id;"
+ )
+
+
def test_distributed_ddl_queue(started_cluster):
+ test_table = "test_table"
+ maintain_test_table(test_table)
node1.query(
- "INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)"
+ f"INSERT INTO testdb.{test_table} SELECT number, toString(number) FROM numbers(100)"
)
node3.query(
- "INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)"
+ f"INSERT INTO testdb.{test_table} SELECT number, toString(number) FROM numbers(100)"
)
- node2.query("SYSTEM SYNC REPLICA testdb.test_table")
- node4.query("SYSTEM SYNC REPLICA testdb.test_table")
+ node2.query(f"SYSTEM SYNC REPLICA testdb.{test_table}")
+ node4.query(f"SYSTEM SYNC REPLICA testdb.{test_table}")
node1.query(
- "ALTER TABLE testdb.test_table ON CLUSTER test_cluster ADD COLUMN somecolumn UInt8 AFTER val",
+ f"ALTER TABLE testdb.{test_table} ON CLUSTER test_cluster ADD COLUMN somecolumn UInt8 AFTER val",
settings={"replication_alter_partitions_sync": "2"},
)
for node in nodes:
- node.query("SYSTEM SYNC REPLICA testdb.test_table")
- assert node.query("SELECT somecolumn FROM testdb.test_table LIMIT 1") == "0\n"
+ node.query(f"SYSTEM SYNC REPLICA testdb.{test_table}")
+ assert (
+ node.query(f"SELECT somecolumn FROM testdb.{test_table} LIMIT 1") == "0\n"
+ )
assert (
node.query(
"SELECT If((SELECT count(*) FROM system.distributed_ddl_queue WHERE cluster='test_cluster' AND entry='query-0000000000') > 0, 'ok', 'fail')"
)
== "ok\n"
)
+
+ node1.query(
+ f"ALTER TABLE testdb.{test_table} ON CLUSTER test_cluster DROP COLUMN somecolumn",
+ settings={"replication_alter_partitions_sync": "2"},
+ )
+
+
+def test_distributed_ddl_rubbish(started_cluster):
+ test_table = "test_table_rubbish"
+ maintain_test_table(test_table)
+ node1.query(
+ f"ALTER TABLE testdb.{test_table} ON CLUSTER test_cluster ADD COLUMN somenewcolumn UInt8 AFTER val",
+ settings={"replication_alter_partitions_sync": "2"},
+ )
+
+ zk_content = node1.query(
+ "SELECT name, value, path FROM system.zookeeper WHERE path LIKE '/clickhouse/task_queue/ddl%' SETTINGS allow_unrestricted_reads_from_keeper=true",
+ parse=True,
+ ).to_dict("records")
+
+ original_query = ""
+ new_query = "query-artificial-" + str(time.monotonic_ns())
+
+ # Copy information about query (one that added 'somenewcolumn') with new query ID
+ # and broken query text (TABLE => TUBLE)
+ for row in zk_content:
+ if row["value"].find("somenewcolumn") >= 0:
+ original_query = row["name"]
+ break
+
+ rows_to_insert = []
+
+ for row in zk_content:
+ if row["name"] == original_query:
+ rows_to_insert.append(
+ {
+ "name": new_query,
+ "path": row["path"],
+ "value": row["value"].replace("TABLE", "TUBLE"),
+ }
+ )
+ continue
+ pos = row["path"].find(original_query)
+ if pos >= 0:
+ rows_to_insert.append(
+ {
+ "name": row["name"],
+ "path": row["path"].replace(original_query, new_query),
+ "value": row["value"],
+ }
+ )
+
+ # Ingest it to ZK
+ for row in rows_to_insert:
+ node1.query(
+ "insert into system.zookeeper (name, path, value) values ('{}', '{}', '{}')".format(
+ f'{row["name"]}', f'{row["path"]}', f'{row["value"]}'
+ )
+ )
+
+ # Ensure that data is visible via system.distributed_ddl_queue
+ assert (
+ int(
+ node1.query(
+ f"SELECT count(1) FROM system.distributed_ddl_queue WHERE entry='{new_query}' AND cluster=''"
+ )
+ )
+ == 4
+ )
+
+ node1.query(
+ f"ALTER TABLE testdb.{test_table} ON CLUSTER test_cluster DROP COLUMN somenewcolumn",
+ settings={"replication_alter_partitions_sync": "2"},
+ )
diff --git a/tests/integration/test_trace_log_build_id/test.py b/tests/integration/test_trace_log_build_id/test.py
index e74aa7d49f91..ae63f1e4a422 100644
--- a/tests/integration/test_trace_log_build_id/test.py
+++ b/tests/integration/test_trace_log_build_id/test.py
@@ -12,7 +12,7 @@
node = cluster.add_instance(
"node",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py
index 939453178fcc..cbf6cd96dcbb 100644
--- a/tests/integration/test_ttl_replicated/test.py
+++ b/tests/integration/test_ttl_replicated/test.py
@@ -18,7 +18,7 @@
node4 = cluster.add_instance(
"node4",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
@@ -30,7 +30,7 @@
node5 = cluster.add_instance(
"node5",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
@@ -41,7 +41,7 @@
node6 = cluster.add_instance(
"node6",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
stay_alive=True,
with_installed_binary=True,
diff --git a/tests/integration/test_version_update/test.py b/tests/integration/test_version_update/test.py
index fd1bfb2ba849..1f33b9e69a37 100644
--- a/tests/integration/test_version_update/test.py
+++ b/tests/integration/test_version_update/test.py
@@ -10,7 +10,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
with_installed_binary=True,
stay_alive=True,
diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py
index 9aea0c0844be..e750c82c2f1c 100644
--- a/tests/integration/test_version_update_after_mutation/test.py
+++ b/tests/integration/test_version_update_after_mutation/test.py
@@ -10,7 +10,7 @@
node1 = cluster.add_instance(
"node1",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
with_installed_binary=True,
stay_alive=True,
@@ -21,7 +21,7 @@
node2 = cluster.add_instance(
"node2",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
with_installed_binary=True,
stay_alive=True,
@@ -32,7 +32,7 @@
node3 = cluster.add_instance(
"node3",
with_zookeeper=True,
- image="clickhouse/clickhouse-server",
+ image="altinity/clickhouse-server",
tag=CLICKHOUSE_CI_MIN_TESTED_VERSION,
with_installed_binary=True,
stay_alive=True,
From e9ee9ef45c223e604714c24aa8c49ce137548aea Mon Sep 17 00:00:00 2001
From: strtgbb <146047128+strtgbb@users.noreply.github.com>
Date: Thu, 22 May 2025 08:52:02 -0400
Subject: [PATCH 2/3] update report, use sha instead of url to get results from
DB
---
.../create_workflow_report.py | 41 ++++++++++---------
1 file changed, 22 insertions(+), 19 deletions(-)
diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py
index 580e27ed9ec5..919571e1a9ee 100755
--- a/.github/actions/create_workflow_report/create_workflow_report.py
+++ b/.github/actions/create_workflow_report/create_workflow_report.py
@@ -148,9 +148,9 @@ def get_run_details(run_url: str) -> dict:
return response.json()
-def get_checks_fails(client: Client, job_url: str):
+def get_checks_fails(client: Client, commit_sha: str, branch_name: str):
"""
- Get tests that did not succeed for the given job URL.
+ Get tests that did not succeed for the given commit and branch.
Exclude checks that have status 'error' as they are counted in get_checks_errors.
"""
query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
@@ -163,19 +163,21 @@ def get_checks_fails(client: Client, job_url: str):
report_url as results_link,
task_url
FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
GROUP BY check_name, test_name, report_url, task_url
)
- WHERE task_url LIKE '{job_url}%'
- AND test_status IN ('FAIL', 'ERROR')
+ WHERE test_status IN ('FAIL', 'ERROR')
AND job_status!='error'
ORDER BY job_name, test_name
"""
return client.query_dataframe(query)
-def get_checks_known_fails(client: Client, job_url: str, known_fails: dict):
+def get_checks_known_fails(
+ client: Client, commit_sha: str, branch_name: str, known_fails: dict
+):
"""
- Get tests that are known to fail for the given job URL.
+ Get tests that are known to fail for the given commit and branch.
"""
if len(known_fails) == 0:
return pd.DataFrame()
@@ -190,10 +192,10 @@ def get_checks_known_fails(client: Client, job_url: str, known_fails: dict):
report_url as results_link,
task_url
FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
GROUP BY check_name, test_name, report_url, task_url
)
- WHERE task_url LIKE '{job_url}%'
- AND test_status='BROKEN'
+ WHERE test_status='BROKEN'
AND test_name IN ({','.join(f"'{test}'" for test in known_fails.keys())})
ORDER BY job_name, test_name
"""
@@ -213,9 +215,9 @@ def get_checks_known_fails(client: Client, job_url: str, known_fails: dict):
return df
-def get_checks_errors(client: Client, job_url: str):
+def get_checks_errors(client: Client, commit_sha: str, branch_name: str):
"""
- Get checks that have status 'error' for the given job URL.
+ Get checks that have status 'error' for the given commit and branch.
"""
query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
FROM (
@@ -227,10 +229,10 @@ def get_checks_errors(client: Client, job_url: str):
report_url as results_link,
task_url
FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
GROUP BY check_name, test_name, report_url, task_url
)
- WHERE task_url LIKE '{job_url}%'
- AND job_status=='error'
+ WHERE job_status=='error'
ORDER BY job_name, test_name
"""
return client.query_dataframe(query)
@@ -431,7 +433,7 @@ def get_cves(pr_number, commit_sha):
def url_to_html_link(url: str) -> str:
if not url:
return ""
- text = url.split("/")[-1].replace("__", "_")
+ text = url.split("/")[-1].split("?")[0]
if not text:
text = "results"
return f'{text} '
@@ -447,7 +449,7 @@ def format_test_status(text: str) -> str:
color = (
"red"
if text.lower().startswith("fail")
- else "orange" if text.lower() in ("error", "broken") else "green"
+ else "orange" if text.lower() in ("error", "broken", "pending") else "green"
)
return f'{text} '
@@ -523,12 +525,15 @@ def main():
settings={"use_numpy": True},
)
+ run_details = get_run_details(args.actions_run_url)
+ branch_name = run_details.get("head_branch", "unknown branch")
+
fail_results = {
"job_statuses": get_commit_statuses(args.commit_sha),
- "checks_fails": get_checks_fails(db_client, args.actions_run_url),
+ "checks_fails": get_checks_fails(db_client, args.commit_sha, branch_name),
"checks_known_fails": [],
"pr_new_fails": [],
- "checks_errors": get_checks_errors(db_client, args.actions_run_url),
+ "checks_errors": get_checks_errors(db_client, args.commit_sha, branch_name),
"regression_fails": get_regression_fails(db_client, args.actions_run_url),
"docker_images_cves": (
[] if not args.cves else get_cves(args.pr_number, args.commit_sha)
@@ -549,12 +554,10 @@ def main():
if known_fails:
fail_results["checks_known_fails"] = get_checks_known_fails(
- db_client, args.actions_run_url, known_fails
+ db_client, args.commit_sha, branch_name, known_fails
)
if args.pr_number == 0:
- run_details = get_run_details(args.actions_run_url)
- branch_name = run_details.get("head_branch", "unknown branch")
pr_info_html = f"Release ({branch_name})"
else:
try:
From dea2209be097fb791726511467422a9f2cdaf90c Mon Sep 17 00:00:00 2001
From: strtgbb <146047128+strtgbb@users.noreply.github.com>
Date: Wed, 16 Jul 2025 09:08:51 -0400
Subject: [PATCH 3/3] can't rely on head_ref being set correctly in the db
---
.../create_workflow_report/create_workflow_report.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py
index 919571e1a9ee..741272685678 100755
--- a/.github/actions/create_workflow_report/create_workflow_report.py
+++ b/.github/actions/create_workflow_report/create_workflow_report.py
@@ -163,7 +163,7 @@ def get_checks_fails(client: Client, commit_sha: str, branch_name: str):
report_url as results_link,
task_url
FROM `gh-data`.checks
- WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
+ WHERE commit_sha='{commit_sha}'
GROUP BY check_name, test_name, report_url, task_url
)
WHERE test_status IN ('FAIL', 'ERROR')
@@ -192,7 +192,7 @@ def get_checks_known_fails(
report_url as results_link,
task_url
FROM `gh-data`.checks
- WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
+ WHERE commit_sha='{commit_sha}'
GROUP BY check_name, test_name, report_url, task_url
)
WHERE test_status='BROKEN'
@@ -229,7 +229,7 @@ def get_checks_errors(client: Client, commit_sha: str, branch_name: str):
report_url as results_link,
task_url
FROM `gh-data`.checks
- WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
+ WHERE commit_sha='{commit_sha}'
GROUP BY check_name, test_name, report_url, task_url
)
WHERE job_status=='error'