diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml index 0ca2a087..4045190e 100644 --- a/.github/actions/test/action.yml +++ b/.github/actions/test/action.yml @@ -1,4 +1,5 @@ name: 'Test' + description: 'A GitHub Action that tests this action' inputs: @@ -21,12 +22,12 @@ runs: - name: Setup Python if: inputs.python-version != 'installed' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ inputs.python-version }} - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Detect OS id: os @@ -48,8 +49,9 @@ runs: shell: bash - name: Cache PIP Packages - uses: actions/cache@v3 + uses: actions/cache@v4 id: cache + if: github.event_name != 'schedule' with: path: ${{ steps.os.outputs.pip-cache }} key: ${{ inputs.os }}-pip-test-${{ inputs.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}-${{ steps.os.outputs.date }} @@ -60,16 +62,37 @@ runs: - name: Install Python dependencies run: | - python3 -V - python3 -m pip freeze | sort - python3 -m pip cache info || true - python3 -m pip cache list || true - python3 -m pip install --upgrade --force pip wheel - python3 -m pip install --force -r python/requirements.txt - python3 -m pip install --force -r python/test/requirements.txt -c python/test/constraints.txt - python3 -m pip freeze | sort - python3 -m pip cache info || true - python3 -m pip cache list || true + python -m venv "$RUNNER_TEMP/venv" + echo "$RUNNER_TEMP/venv/bin" >> "$GITHUB_PATH" + echo "$RUNNER_TEMP/venv/Scripts" >> "$GITHUB_PATH" + export PATH="$RUNNER_TEMP/venv/bin:$RUNNER_TEMP/venv/Scripts:$PATH" + which python + + # inspect pip cache + python -m pip freeze | sort + python -m pip cache info || true + python -m pip cache list || true + + # remove cached built whl files + rm -rf "$(python -m pip cache info | grep ".*[Ww]heels location: " | cut -d ":" -f 2- | cut -d " " -f 2-)" + python -m pip cache list || true + + # install dependencies + python_minor_version="$(python -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')" + python -m pip install --force -r python/requirements-$python_minor_version.txt + python -m pip install --force -r python/test/requirements.txt -c python/test/constraints.txt + + # inspect pip cache + python -m pip freeze | sort + python -m pip cache info || true + python -m pip cache list || true + + # assert no whl files have been built + if python -m pip cache info && [[ "$(python -m pip cache info | grep "Number of .*wheels:")" != "Number of "*"wheels: 0" ]] + then + echo "Dependency whl files have been built" + exit 1 + fi shell: bash - name: Update expectation files @@ -84,16 +107,16 @@ runs: # we only upload the changed files if we can find zip if which zip then - (git diff --name-only && git ls-files -o --exclude-standard) | xargs -d "\n" zip changed-expectations.zip + (git diff --name-only && git ls-files -o --exclude-standard) | xargs zip changed-expectations.zip exit 1 fi fi shell: bash - name: Upload changed expectation files if: steps.changes.outcome == 'failure' - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: Changed expectations + name: Changed expectations (python-${{ inputs.python-version }}, ${{ inputs.os }}) path: changed-expectations.zip if-no-files-found: error @@ -102,7 +125,7 @@ runs: PYTHONPATH: .. run: | cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest.xml + python -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest.xml shell: bash - name: PyTest (EST) @@ -112,7 +135,7 @@ runs: PYTHONPATH: .. run: | cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-est.xml + python -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-est.xml shell: bash - name: PyTest (CET) @@ -122,12 +145,12 @@ runs: PYTHONPATH: .. run: | cd python/test - python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-cet.xml + python -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-cet.xml shell: bash - name: Upload Test Results if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Test Results (python-${{ inputs.python-version }}, ${{ inputs.os }}) path: | diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 35bd16d2..8ac6b8c4 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,7 +4,3 @@ updates: directory: "/" schedule: interval: "monthly" - - package-ecosystem: "pip" - directory: "/" - schedule: - interval: "weekly" diff --git a/.github/upgrade-pip-packages.sh b/.github/upgrade-pip-packages.sh index bfcfddf4..e9c2f0d2 100755 --- a/.github/upgrade-pip-packages.sh +++ b/.github/upgrade-pip-packages.sh @@ -2,12 +2,13 @@ set -euo pipefail base="$(dirname "$0")" +python_minor_version="$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')" -pip install --upgrade --force pip==22.0.0 -pip install --upgrade --upgrade-strategy eager -r "$base/../python/requirements-direct.txt" +pip install --upgrade --force pip==24.0.0 +pip install --upgrade --upgrade-strategy eager -r "$base/../python/requirements.txt" pip install pipdeptree -pipdeptree --packages="$(sed -e "s/;.*//" -e "s/=.*//g" "$base/../python/requirements-direct.txt" | paste -s -d ,)" --freeze > "$base/../python/requirements.txt" +pipdeptree --packages="$(sed -e "s/;.*//" -e "s/=.*//g" "$base/../python/requirements.txt" | paste -s -d ,)" --freeze > "$base/../python/requirements-$python_minor_version.txt" -git diff "$base/../python/requirements.txt" +git diff "$base/../python/requirements-$python_minor_version.txt" diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index d056ecd4..ff4204d8 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -19,34 +19,54 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 - - name: Check requirements.txt against requirements-direct.txt + uses: actions/checkout@v4 + - name: Check requirements.txt run: | - (diff -w python/requirements-direct.txt python/requirements.txt || true) | (! grep -e "^<") + (diff -w <(grep -v "python_version > '3.7'" python/requirements.txt | sed -e "s/;.*//") python/requirements-3.7.txt || true) | (! grep -e "^<") + (diff -w <(grep -v "python_version <= '3.7'" python/requirements.txt | sed -e "s/;.*//") python/requirements-post-3.7.txt || true) | (! grep -e "^<") shell: bash - name: Check for dependency updates continue-on-error: true - run: - .github/upgrade-pip-packages.sh + run: | + pip install tox + tox + git diff --exit-code shell: bash test-mac: name: "Test macOS" uses: "./.github/workflows/test-os.yml" with: - os: '["macos-12", "macos-13"]' + os: '["macos-12", "macos-13", "macos-14"]' + python-version: '["3.10", "3.11", "3.12", "installed"]' + include: > + [ + {"os": "macos-12", "python-version": "3.8"}, + {"os": "macos-12", "python-version": "3.9"}, + {"os": "macos-13", "python-version": "3.8"}, + {"os": "macos-13", "python-version": "3.9"}, + ] test-lnx: name: "Test Ubuntu" uses: "./.github/workflows/test-os.yml" with: - os: '["ubuntu-20.04", "ubuntu-22.04"]' + os: '["ubuntu-20.04", "ubuntu-22.04", "ubuntu-24.04"]' + python-version: '["3.9", "3.10", "3.11", "3.12", "installed"]' + include: > + [ + {"os": "ubuntu-20.04", "python-version": "3.7"}, + {"os": "ubuntu-20.04", "python-version": "3.8"}, + {"os": "ubuntu-22.04", "python-version": "3.8"}, + ] test-win: name: "Test Windows" uses: "./.github/workflows/test-os.yml" with: os: '["windows-2019", "windows-2022"]' + python-version: '["3.8", "3.9", "3.10", "3.11", "3.12", "installed"]' + include: '[{"os": "windows-2019", "python-version": "3.7"}]' publish: name: "Publish" @@ -75,10 +95,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Extract action image and version - # we deploy from a specific commit on main (the one that mentions a new version the first time) + # we deploy from a specific commit on master (the one that mentions a new version the first time) # so we need to tell docker/metadata-action to extract docker tags from that version id: action run: | @@ -93,12 +113,10 @@ jobs: env: DOCKER_CLI_EXPERIMENTAL: enabled run: | - exists='false' if docker manifest inspect '${{ steps.action.outputs.image }}' then - exists='true' + echo "exists=true" >>$GITHUB_OUTPUT fi - echo "exists=$exists" >>$GITHUB_OUTPUT shell: bash deploy: @@ -117,7 +135,7 @@ jobs: steps: - name: Docker meta id: docker-meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: ghcr.io/step-security/publish-unit-test-result-action flavor: | @@ -131,13 +149,13 @@ jobs: type=semver,pattern={{version}},value=${{ needs.config-deploy.outputs.image-version }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to GitHub Container Registry - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} @@ -156,7 +174,7 @@ jobs: - name: Build and push Docker image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 id: build with: tags: ${{ steps.docker-meta.outputs.tags }} @@ -184,20 +202,20 @@ jobs: id-token: write # for creating OIDC tokens for signing. packages: write # for uploading attestations. if: ${{ needs.deploy.outputs.should_push == 'true' }} - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0 with: image: ${{ needs.deploy.outputs.image }} digest: ${{ needs.deploy.outputs.digest }} registry-username: ${{ github.actor }} secrets: registry-password: ${{ secrets.GITHUB_TOKEN }} - + event_file: name: "Event File" runs-on: ubuntu-latest steps: - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Event File path: ${{ github.event_path }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 920bda02..c1186f5d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -31,11 +31,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -46,7 +46,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -60,4 +60,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 29d25c07..0fb4992e 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -13,10 +13,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: path: artifacts @@ -57,7 +57,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -66,20 +66,22 @@ jobs: platforms: ${{ matrix.arch }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build Docker image id: build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: load: true push: false platforms: linux/${{ matrix.arch }} - tags: step-security/publish-unit-test-result-action:latest + tags: enricomi/publish-unit-test-result-action:latest outputs: type=docker + env: + DOCKER_BUILD_RECORD_UPLOAD: false - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: path: artifacts @@ -125,6 +127,7 @@ jobs: -e "INPUT_DEDUPLICATE_CLASSES_BY_FILE_NAME" \ -e "INPUT_LARGE_FILES" \ -e "INPUT_IGNORE_RUNS" \ + -e "INPUT_CHECK_RUN" \ -e "INPUT_JOB_SUMMARY" \ -e "INPUT_COMPARE_TO_EARLIER_COMMIT" \ -e "INPUT_PULL_REQUEST_BUILD" \ @@ -187,7 +190,7 @@ jobs: -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" \ -v "/home/runner/work/_temp/_runner_file_commands":"/github/file_commands" \ -v "/home/runner/work/publish-unit-test-result-action/publish-unit-test-result-action":"$GITHUB_WORKSPACE" \ - step-security/publish-unit-test-result-action:latest + enricomi/publish-unit-test-result-action:latest shell: bash - name: JSON output @@ -198,23 +201,23 @@ jobs: - name: Scan for vulnerabilities id: scan - uses: crazy-max/ghaction-container-scan@v2 + uses: crazy-max/ghaction-container-scan@v3 with: - image: step-security/publish-unit-test-result-action:latest + image: enricomi/publish-unit-test-result-action:latest dockerfile: ./Dockerfile annotations: true - name: Upload SARIF artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: SARIF + name: SARIF ${{ matrix.arch }} path: ${{ steps.scan.outputs.sarif }} - name: Upload SARIF file if: always() && steps.scan.outputs.sarif != '' - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3 with: sarif_file: ${{ steps.scan.outputs.sarif }} - publish-composite: + publish-linux: name: Publish Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) runs-on: ${{ matrix.os }} permissions: @@ -223,55 +226,248 @@ jobs: strategy: fail-fast: false - max-parallel: 3 + max-parallel: 2 matrix: # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources - # test *-latest and newer (because newer eventually become 'latest' and should be tested to work before that) include: - - os: macos-latest - os-label: macOS - python: "3.8" - - os: macos-latest - os-label: macOS - python: "installed" - - os: ubuntu-latest os-label: Linux python: "3.8" + - os: ubuntu-latest + os-label: Linux + python: "venv" - os: ubuntu-latest os-label: Linux python: "installed" - os: ubuntu-20.04 os-label: Linux 20.04 python: "installed" + - os: ubuntu-24.04 + os-label: Linux 24.04 + python: "installed" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Python + if: matrix.python != 'installed' && matrix.python != 'venv' + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Install virtualenv + if: matrix.python == 'venv' + run: python3 -m pip install virtualenv + shell: bash + + - name: Download Artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + + - name: Publish Test Results + id: test-results + uses: ./linux + with: + check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + files: artifacts/**/*.xml + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + publish-macos: + name: Publish Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + runs-on: ${{ matrix.os }} + permissions: + checks: write + pull-requests: write + + strategy: + fail-fast: false + max-parallel: 2 + matrix: + # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources + include: + - os: macos-13 + os-label: macOS + python: "3.8" + - os: macos-latest + os-label: macOS + python: "venv" + - os: macos-latest + os-label: macOS + python: "installed" + - os: macos-12 + os-label: macOS 12 + python: "installed" + - os: macos-13 + os-label: macOS 13 + python: "installed" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Python + if: matrix.python != 'installed' && matrix.python != 'venv' + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + + - name: Setup Python + if: matrix.python == 'venv' + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install virtualenv + if: matrix.python == 'venv' + run: python3 -m pip install virtualenv + shell: bash + + - name: Download Artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + + - name: Publish Test Results + id: test-results + uses: ./macos + with: + check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + files: artifacts/**/*.xml + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + publish-windows: + name: Publish Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + runs-on: ${{ matrix.os }} + permissions: + checks: write + pull-requests: write + strategy: + fail-fast: false + max-parallel: 2 + matrix: + # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources + include: - os: windows-latest os-label: Windows python: "installed" + - os: windows-latest + os-label: Windows + python: "venv" - os: windows-2019 os-label: Windows 2019 python: "installed" steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - if: matrix.python != 'installed' - uses: actions/setup-python@v4 + if: matrix.python != 'installed' && matrix.python != 'venv' + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} + - name: Install virtualenv + if: matrix.python == 'venv' + run: python3 -m pip install virtualenv + shell: bash + - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: path: artifacts - name: Publish Test Results id: test-results - uses: ./composite + uses: ./windows with: check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) + files: artifacts\**\*.xml + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + + - name: JSON output + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results.outputs.json }}' + json_file: 'tests.json' + + - name: Publish Test Results (Bash) + id: test-results-bash + if: always() && steps.test-results.outcome != 'skipped' + uses: ./windows/bash + with: + check_name: Test Results (${{ matrix.os-label }} bash python ${{ matrix.python }}) + files: artifacts\**\*.xml + json_file: "tests.json" + json_suite_details: true + json_test_case_results: true + report_suite_logs: "any" + + - name: JSON output (Bash) + uses: ./misc/action/json-output + with: + json: '${{ steps.test-results-bash.outputs.json }}' + json_file: 'tests.json' + + publish-composite: + name: Publish Test Results (${{ matrix.os-label }} composite) + runs-on: ${{ matrix.os }} + permissions: + checks: write + pull-requests: write + + strategy: + fail-fast: false + max-parallel: 1 + matrix: + # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources + # test *-latest and newer (because newer eventually become 'latest' and should be tested to work before that) + include: + - os: macos-latest + os-label: macOS + - os: ubuntu-latest + os-label: Linux + - os: windows-latest + os-label: Windows + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Download Artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + + - name: Publish Test Results + id: test-results + uses: ./composite + with: + check_name: Test Results (${{ matrix.os-label }} composite python ${{ matrix.python }}) files: | artifacts/**/*.xml artifacts\**\*.xml @@ -295,7 +491,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Copy test result files run: cp -rv python/test/files test-files @@ -341,7 +537,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Copy test junit xml files run: cp -rv python/test/files/junit-xml test-files @@ -370,3 +566,4 @@ jobs: with: json: '${{ steps.test-results.outputs.json }}' json_file: 'tests.json' + \ No newline at end of file diff --git a/.github/workflows/test-os.yml b/.github/workflows/test-os.yml index 9f90a0c0..19b3af1b 100644 --- a/.github/workflows/test-os.yml +++ b/.github/workflows/test-os.yml @@ -6,6 +6,12 @@ on: os: required: true type: string + python-version: + required: true + type: string + include: + required: true + type: string jobs: test: name: Test (python-${{ matrix.python-version }}, ${{ matrix.os }}) @@ -14,15 +20,12 @@ jobs: fail-fast: false matrix: os: ${{ fromJson(inputs.os) }} - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12.0-rc.3", "installed"] - - include: - - os: ${{ fromJson(inputs.os)[0] }} - python-version: "3.7" + python-version: ${{ fromJson(inputs.python-version) }} + include: ${{ fromJson(inputs.include) }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Test uses: ./.github/actions/test diff --git a/.github/workflows/test-results.yml b/.github/workflows/test-results.yml index c5b599e3..e8a4f7c9 100644 --- a/.github/workflows/test-results.yml +++ b/.github/workflows/test-results.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Download and Extract Artifacts - uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 + uses: dawidd6/action-download-artifact@e7466d1a7587ed14867642c2ca74b5bcc1e19a2d with: run_id: ${{ github.event.workflow_run.id }} path: artifacts @@ -31,7 +31,7 @@ jobs: check_name: Test Results (reference) event_file: artifacts/Event File/event.json event_name: ${{ github.event.workflow_run.event }} - check_run_annotations_branch: "main, devel-1.0, devel-2.0" + check_run_annotations_branch: "master, master-1.x, devel-1.0, devel-2.0" files: "artifacts/**/*.xml" log_level: DEBUG diff --git a/Dockerfile b/Dockerfile index e70ac1d5..115d3c04 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,10 +5,12 @@ LABEL homepage="https://github.com/step-security/publish-unit-test-result-action LABEL com.github.actions.name="Publish Test Results" LABEL com.github.actions.description="A GitHub Action to publish test results." +LABEL com.github.actions.icon="check-circle" +LABEL com.github.actions.color="green" RUN apk add --no-cache --upgrade expat libuuid -COPY python/requirements.txt /action/ +COPY python/requirements-post-3.7.txt /action/requirements.txt RUN apk add --no-cache build-base libffi-dev; \ pip install --upgrade --force --no-cache-dir pip && \ pip install --upgrade --force --no-cache-dir -r /action/requirements.txt; \ diff --git a/README.md b/README.md index 248a5048..3755ecb9 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ publishes the results on GitHub. It supports [JSON (Dart, Mocha), TRX (MSTest, V and runs on Linux, macOS and Windows. You can use this action with ![Ubuntu Linux](misc/badge-ubuntu.svg) runners (e.g. `runs-on: ubuntu-latest`) -or ![ARM Linux](misc/badge-arm.svg) self-hosted runners: +or ![ARM Linux](misc/badge-arm.svg) self-hosted runners that support Docker: ```yaml - name: Publish Test Results @@ -22,12 +22,34 @@ or ![ARM Linux](misc/badge-arm.svg) self-hosted runners: See the [notes on running this action with absolute paths](#running-with-absolute-paths) if you cannot use relative test result file paths. -Use this for ![macOS](misc/badge-macos.svg) (e.g. `runs-on: macos-latest`) -and ![Windows](misc/badge-windows.svg) (e.g. `runs-on: windows-latest`) runners: +Use this for ![macOS](misc/badge-macos.svg) (e.g. `runs-on: macos-latest`) runners: +```yaml +- name: Publish Test Results + uses: step-security/publish-unit-test-result-action/macos@v2 + if: always() + with: + files: | + test-results/**/*.xml + test-results/**/*.trx + test-results/**/*.json +``` + +… and ![Windows](misc/badge-windows.svg) (e.g. `runs-on: windows-latest`) runners: +```yaml +- name: Publish Test Results + uses: step-security/publish-unit-test-result-action/windows@v2 + if: always() + with: + files: | + test-results\**\*.xml + test-results\**\*.trx + test-results\**\*.json +``` +For **self-hosted** Linux GitHub Actions runners **without Docker** installed, please use: ```yaml - name: Publish Test Results - uses: step-security/publish-unit-test-result-action/composite@v2 + uses: step-security/publish-unit-test-result-action/linux@v2 if: always() with: files: | @@ -36,7 +58,7 @@ and ![Windows](misc/badge-windows.svg) (e.g. `runs-on: windows-latest`) runners: test-results/**/*.json ``` -See the [notes on running this action as a composite action](#running-as-a-composite-action) if you run it on Windows or macOS. +See the [notes on running this action as a non-Docker action](#running-as-a-non-docker-action). If you see the `"Resource not accessible by integration"` error, you have to grant additional [permissions](#permissions), or [setup the support for pull requests from fork repositories and branches created by Dependabot](#support-fork-repositories-and-dependabot-branches). @@ -80,14 +102,59 @@ Check your favorite development and test environment for its JSON, TRX file or J |Test Environment |Language| JUnit
XML | NUnit
XML | XUnit
XML | TRX
file | JSON
file | |-----------------|:------:|:---------:|:---------:|:---------:|:---:|:---:| -|[Dart](https://github.com/dart-lang/test/blob/master/pkgs/test/doc/json_reporter.md)|Dart, Flutter| | | | | :heavy_check_mark: | -|[Jest](https://jestjs.io/docs/configuration#default-reporter)|JavaScript|:heavy_check_mark:| | | | | -|[Maven](https://maven.apache.org/surefire/maven-surefire-plugin/examples/junit.html)|Java, Scala, Kotlin|:heavy_check_mark:| | | | | -|[Mocha](https://mochajs.org/#xunit)|JavaScript|:heavy_check_mark:| |[not xunit](https://github.com/mochajs/mocha/issues/4758)| | :heavy_check_mark: | -|[MStest / dotnet](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)|.Net|[:heavy_check_mark:](https://github.com/spekt/junit.testlogger#usage)|[:heavy_check_mark:](https://github.com/spekt/nunit.testlogger#usage)|[:heavy_check_mark:](https://github.com/spekt/xunit.testlogger#usage)|[:heavy_check_mark:](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)| | -|[pytest](https://docs.pytest.org/en/latest/how-to/output.html#creating-junitxml-format-files)|Python|:heavy_check_mark:| | | | | -|[sbt](https://www.scala-sbt.org/release/docs/Testing.html#Test+Reports)|Scala|:heavy_check_mark:| | | | | -|Your favorite
environment|Your favorite
language|probably
:heavy_check_mark:| | | | | +|[Dart](https://github.com/dart-lang/test/blob/master/pkgs/test/doc/json_reporter.md)|Dart, Flutter| | | | |:white_check_mark:| +|[Jest](https://jestjs.io/docs/configuration#default-reporter)|JavaScript|:white_check_mark:| | | | | +|[Maven](https://maven.apache.org/surefire/maven-surefire-plugin/examples/junit.html)|Java, Scala, Kotlin|:white_check_mark:| | | | | +|[Mocha](https://mochajs.org/#xunit)|JavaScript|:white_check_mark:| |[not xunit](https://github.com/mochajs/mocha/issues/4758)| |:white_check_mark:| +|[MStest / dotnet](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)|.Net|[:white_check_mark:](https://github.com/spekt/junit.testlogger#usage)|[:white_check_mark:](https://github.com/spekt/nunit.testlogger#usage)|[:white_check_mark:](https://github.com/spekt/xunit.testlogger#usage)|[:white_check_mark:](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)| | +|[pytest](https://docs.pytest.org/en/latest/how-to/output.html#creating-junitxml-format-files)|Python|:white_check_mark:| | | | | +|[sbt](https://www.scala-sbt.org/release/docs/Testing.html#Test+Reports)|Scala|:white_check_mark:| | | | | +|Your favorite
environment|Your favorite
language|probably
:white_check_mark:| | | | | + +## What is new in version 2 + +
+These changes have to be considered when moving from version 1 to version 2: + +### Default value for `check_name` changed +Unless `check_name` is set in your config, the check name used to publish test results changes from `"Unit Test Results"` to `"Test Results"`. + +**Impact:** +The check with the old name will not be updated once moved to version 2. + +**Workaround to get version 1 behaviour:** +Add `check_name: "Unit Test Results"` to your config. + +### Default value for `comment_title` changed +Unless `comment_title` or `check_name` are set in your config, the title used to comment on open pull requests changes from `"Unit Test Results"` to `"Test Results"`. + +**Impact:** +Existing comments with the old title will not be updated once moved to version 2, but a new comment is created. + +**Workaround to get version 1 behaviour:** +See workaround for `check_name`. + +### Modes `create new` and `update last` removed for option `comment_mode` +The action always updates an earlier pull request comment, which is the exact behaviour of mode `update last`. +The [configuration](#configuration) options `create new` and `update last` are therefore removed. + +**Impact:** +An existing pull request comment is always updated. + +**Workaround to get version 1 behaviour:** +Not supported. + +### Option `hiding_comments` removed +The action always updates an earlier pull request comment, so hiding comments is not required anymore. + +### Option `comment_on_pr` removed +Option `comment_on_pr` has been removed. + +**Workaround to get version 1 behaviour:** +Set `comment_mode` to `always` (the default) or `off`. + +
+ ## Publishing test results @@ -126,6 +193,8 @@ Those are highlighted in pull request comments to easily spot unintended test re ***Note:** This requires `check_run_annotations` to be set to `all tests, skipped tests`.* +Comments can be disabled with `comment_mode: off`. + ### Commit and pull request checks The checks section of a commit and related pull requests list a short summary (here `1 fail, 1 skipped, …`), @@ -139,6 +208,8 @@ Pull request checks: ![pull request checks example](misc/github-pull-request-checks.png) +Check runs can be disabled with `check_run: false`. + ### Commit and pull request annotations Each failing test produces an annotation with failure details in the checks section of a commit: @@ -154,6 +225,8 @@ Use option `test_file_prefix` to add a prefix to, or remove a prefix from these ***Note:** Only the first failure of a test is shown. If you want to see all failures, set `report_individual_runs: "true"`.* +Check run annotations can be disabled with `ignore_runs: true`. + ### GitHub Actions job summary The results are added to the job summary page of the workflow that runs this action: @@ -164,12 +237,16 @@ In presence of failures or errors, the job summary links to the respective [chec ***Note:** Job summary requires [GitHub Actions runner v2.288.0](https://github.com/actions/runner/releases/tag/v2.288.0) or above.* +Job summaries can be disabled with `job_summary: false`. + ### GitHub Actions check summary of a commit Test results are published in the GitHub Actions check summary of the respective commit: ![checks comment example](misc/github-checks-comment.png) +Check runs can be disabled with `check_run: false`. + ## The symbols [comment]: <> (This heading is linked to from method get_link_and_tooltip_label_md) @@ -177,7 +254,7 @@ The symbols have the following meaning: |Symbol|Meaning| |:----:|-------| -||A successful test or run| +|  :white_check_mark:|A successful test or run| ||A skipped test or run| ||A failed test or run| ||An erroneous test or run| @@ -204,7 +281,7 @@ The list of most notable options: |Option|Default Value|Description| |:-----|:-----:|:----------| -|`files`|_no default_|File patterns of test result files. Relative paths are known to work best, while the composite action [also works with absolute paths](#running-with-absolute-paths). Supports `*`, `**`, `?`, and `[]` character ranges. Use multiline string for multiple patterns. Patterns starting with `!` exclude the matching files. There have to be at least one pattern starting without a `!`.| +|`files`|_no default_|File patterns of test result files. Relative paths are known to work best, while the non-Docker action [also works with absolute paths](#running-with-absolute-paths). Supports `*`, `**`, `?`, and `[]` character ranges. Use multiline string for multiple patterns. Patterns starting with `!` exclude the matching files. There have to be at least one pattern starting without a `!`.| |`check_name`|`"Test Results"`|An alternative name for the check result. Required to be unique for each instance in one workflow.| |`comment_title`|same as `check_name`|An alternative name for the pull request comment.| |`comment_mode`|`always`|The action posts comments to pull requests that are associated with the commit. Set to:
`always` - always comment
`changes` - comment when changes w.r.t. the target branch exist
`changes in failures` - when changes in the number of failures and errors exist
`changes in errors` - when changes in the number of (only) errors exist
`failures` - when failures or errors exist
`errors` - when (only) errors exist
`off` - to not create pull request comments.| @@ -236,6 +313,7 @@ The list of most notable options: |:-----|:-----:|:----------| |`time_unit`|`seconds`|Time values in the test result files have this unit. Supports `seconds` and `milliseconds`.| |`test_file_prefix`|`none`|Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".| +|`check_run`|`true`|Set to `true`, the results are published as a check run, but it may not be associated with the workflow that ran this action.| |`job_summary`|`true`|Set to `true`, the results are published as part of the [job summary page](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) of the workflow run.| |`compare_to_earlier_commit`|`true`|Test results are compared to results of earlier commits to show changes:
`false` - disable comparison, `true` - compare across commits.'| |`test_changes_limit`|`10`|Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of `0`.| @@ -285,7 +363,7 @@ Here is an example JSON: ```json { "title": "4 parse errors, 4 errors, 23 fail, 18 skipped, 227 pass in 39m 12s", - "summary": "  24 files  ±0      4 errors  21 suites  ±0   39m 12s [:stopwatch:](https://github.com/step-security/publish-unit-test-result-action \"duration of all tests\") ±0s\n272 tests ±0  227 [:heavy_check_mark:](https://github.com/step-security/publish-unit-test-result-action \"passed tests\") ±0  18 [:zzz:](https://github.com/step-security/publish-unit-test-result-action \"skipped / disabled tests\") ±0  23 [:x:](https://github.com/step-security/publish-unit-test-result-action \"failed tests\") ±0  4 [:fire:](https://github.com/step-security/publish-unit-test-result-action \"test errors\") ±0 \n437 runs  ±0  354 [:heavy_check_mark:](https://github.com/step-security/publish-unit-test-result-action \"passed tests\") ±0  53 [:zzz:](https://github.com/step-security/publish-unit-test-result-action \"skipped / disabled tests\") ±0  25 [:x:](https://github.com/step-security/publish-unit-test-result-action \"failed tests\") ±0  5 [:fire:](https://github.com/step-security/publish-unit-test-result-action \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n", + "summary": "  24 files  ±0      4 errors  21 suites  ±0   39m 12s [:stopwatch:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"duration of all tests\") ±0s\n272 tests ±0  227 [:white_check_mark:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  18 [:zzz:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  23 [:x:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  4 [:fire:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n437 runs  ±0  354 [:white_check_mark:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"passed tests\") ±0  53 [:zzz:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"skipped / disabled tests\") ±0  25 [:x:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"failed tests\") ±0  5 [:fire:](https://github.com/step-security/publish-unit-test-result-action/blob/v2.6.1/README.md#the-symbols \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n", "conclusion": "success", "stats": { "files": 24, @@ -314,6 +392,7 @@ Here is an example JSON: "reference_type": "earlier", "reference_commit": "d8ce4b6c62ebfafe1890c55bf7ea30058ebf77f2" }, + "check_url": "https://github.com/step-security/publish-unit-test-result-action/runs/5397876970", "formatted": { "stats": { "duration": "2 352", @@ -441,10 +520,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -453,7 +532,7 @@ jobs: - name: Upload Test Results if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Test Results (Python ${{ matrix.python-version }}) path: pytest.xml @@ -477,7 +556,7 @@ jobs: steps: - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: path: artifacts @@ -518,7 +597,7 @@ event_file: runs-on: ubuntu-latest steps: - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Event File path: ${{ github.event_path }} @@ -530,7 +609,7 @@ Adjust the value of `path` to fit your setup: ```yaml - name: Upload Test Results if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Test Results path: | @@ -588,7 +667,7 @@ jobs: steps: - name: Download and Extract Artifacts - uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 + uses: dawidd6/action-download-artifact@e7466d1a7587ed14867642c2ca74b5bcc1e19a2d with: run_id: ${{ github.event.workflow_run.id }} path: artifacts @@ -651,6 +730,9 @@ Disabling the pull request comment mode (`"off"`) for events other than `pull_re ## Create a badge from test results +Here is an example how to use the [JSON](#json-result) output of this action to create a badge like this: +[![Test Results](https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/badge.svg)](https://gist.githubusercontent.com/EnricoMi/612cb538c14731f1a8fefe504f519395/raw/badge.svg) +
Example workflow YAML @@ -680,7 +762,7 @@ steps: esac - name: Create badge - uses: emibcn/badge-action@d6f51ff11b5c3382b3b88689ae2d6db22d9737d1 + uses: emibcn/badge-action@808173dd03e2f30c980d03ee49e181626088eee8 with: label: Tests status: '${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.tests }} tests, ${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.runs }} runs: ${{ fromJSON( steps.test-results.outputs.json ).conclusion }}' @@ -692,7 +774,7 @@ steps: if: > github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'master' || github.event_name != 'workflow_run' && github.ref == 'refs/heads/master' - uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + uses: andymckay/append-gist-action@6e8d64427fe47cbacf4ab6b890411f1d67c07f3e with: token: ${{ secrets.GIST_TOKEN }} gistURL: https://gist.githubusercontent.com/{user}/{id} @@ -709,10 +791,14 @@ You can then use the badge via this URL: https://gist.githubusercontent.com/{use ## Running with absolute paths It is known that this action works best with relative paths (e.g. `test-results/**/*.xml`), -but most absolute paths (e.g. `/tmp/test-results/**/*.xml`) require to use the composite variant -of this action (`uses: step-security/publish-unit-test-result-action/composite@v2`). +but most absolute paths (e.g. `/tmp/test-results/**/*.xml`) require to use the non-Docker variant +of this action: + + uses: step-security/publish-unit-test-result-action/linux@v2 + uses: step-security/publish-unit-test-result-action/macos@v2 + uses: step-security/publish-unit-test-result-action/windows@v2 -If you have to use absolute paths with the non-composite variant of this action (`uses: step-security/publish-unit-test-result-action@v2`), +If you have to use absolute paths with the Docker variant of this action (`uses: step-security/publish-unit-test-result-action@v2`), you have to copy files to a relative path first, and then use the relative path: ```yaml @@ -732,24 +818,41 @@ you have to copy files to a relative path first, and then use the relative path: test-results/**/*.json ``` -Using the non-composite variant of this action is recommended as it starts up much quicker. +Using the Docker variant of this action is recommended as it starts up much quicker. -## Running as a composite action +## Running as a non-Docker action -Running this action as a composite action allows to run it on various operating systems as it -does not require Docker. The composite action, however, requires a Python3 environment to be setup -on the action runner. All GitHub-hosted runners (Ubuntu, Windows Server and macOS) provide a suitable -Python3 environment out-of-the-box. +Running this action as below allows to run it on action runners that do not provide Docker: + + uses: step-security/publish-unit-test-result-action/linux@v2 + uses: step-security/publish-unit-test-result-action/macos@v2 + uses: step-security/publish-unit-test-result-action/windows@v2 + +These actions, however, require a Python3 environment to be setup on the action runner. +All GitHub-hosted runners (Ubuntu, Windows Server and macOS) provide a suitable Python3 environment out-of-the-box. Self-hosted runners may require setting up a Python environment first: ```yaml - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.8 ``` -Self-hosted runners for Windows require Bash shell to be installed. Easiest way to have one is by installing -Git for Windows, which comes with Git BASH. Make sure that the location of `bash.exe` is part of the `PATH` -environment variable seen by the self-hosted runner. +Start-up of the action is faster with `virtualenv` or `venv` package installed. + +## Running as a composite action + +Running this action via: + + uses: step-security/publish-unit-test-result-action/composite@v2 + +is **deprecated**, please use an action appropriate for your operating system and shell: + +- Linux (Bash shell): `uses: step-security/publish-unit-test-result-action/linux@v2` +- macOS (Bash shell): `uses: step-security/publish-unit-test-result-action/macos@v2` +- Windows (PowerShell): `uses: step-security/publish-unit-test-result-action/windows@v2` +- Windows (Bash shell): `uses: step-security/publish-unit-test-result-action/windows/bash@v2` + +These are non-Docker variations of this action. For details, see section ["Running as a non-Docker action"](#running-as-a-non-docker-action) above. diff --git a/action.yml b/action.yml index 5181086e..7eab1b77 100644 --- a/action.yml +++ b/action.yml @@ -1,4 +1,5 @@ name: 'Publish Test Results' + description: 'Publishes JUnit, NUnit, XUnit, TRX, JSON test results on GitHub for .NET, Dart, Java, JS, Jest, Mocha, Python, Scala, …' inputs: @@ -41,7 +42,7 @@ inputs: default: 'false' required: false files: - description: 'File patterns of test result files. Relative paths are known to work best, while the composite action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' + description: 'File patterns of test result files. Relative paths are known to work best, while the non-Docker action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' required: false junit_files: description: 'Deprecated, use "files" option instead.' @@ -79,6 +80,10 @@ inputs: description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.' default: 'false' required: false + check_run: + description: 'Set to "true", the results are published as a check run, but it may not be associated with the workflow that ran this action.' + default: 'true' + required: false job_summary: description: 'Set to "true", the results are published as part of the job summary page of the workflow run.' default: 'true' @@ -137,14 +142,15 @@ inputs: description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".' default: 'false' required: false + outputs: json: description: "Test results as JSON" runs: using: 'docker' - image: 'docker://ghcr.io/step-security/publish-unit-test-result-action:v1.0.3' + image: 'docker://ghcr.io/step-security/publish-unit-test-result-action:v2.17.0' branding: - icon: 'check-square' - color: 'green' \ No newline at end of file + icon: 'check-circle' + color: 'green' diff --git a/composite/action.yml b/composite/action.yml index 50608521..9136f62a 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -1,4 +1,5 @@ name: 'Publish Test Results' + description: 'Publishes JUnit, NUnit, XUnit, TRX, JSON test results on GitHub for .NET, Dart, Java, JS, Jest, Mocha, Python, Scala, …' inputs: @@ -41,7 +42,7 @@ inputs: default: 'false' required: false files: - description: 'File patterns of test result files. Relative paths are known to work best, while the composite action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' + description: 'File patterns of test result files. Relative paths are known to work best, while the non-Docker action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' required: false junit_files: description: 'Deprecated, use "files" option instead.' @@ -79,6 +80,10 @@ inputs: description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.' default: 'false' required: false + check_run: + description: 'Set to "true", the results are published as a check run, but it may not be associated with the workflow that ran this action.' + default: 'true' + required: false job_summary: description: 'Set to "true", the results are published as part of the job summary page of the workflow run.' default: 'true' @@ -146,76 +151,75 @@ outputs: runs: using: 'composite' steps: - - name: Validate subscription - shell: bash + - name: Deprecation warning run: | - API_URL="https://agent.api.stepsecurity.io/v1/github/$GITHUB_REPOSITORY/actions/subscription" - - # Set a timeout for the curl command (3 seconds) - RESPONSE=$(curl --max-time 3 -s -w "%{http_code}" "$API_URL" -o /dev/null) || true - CURL_EXIT_CODE=${?} + # Print deprecation warning + echo "::warning::Running this action via 'uses: step-security/publish-unit-test-result-action/composite@v2 is deprecated! For details, see: https://github.com/step-security/publish-unit-test-result-action/tree/v2#running-as-a-composite-action" + shell: bash - # Check if the response code is not 200 - if [ $CURL_EXIT_CODE -ne 0 ] || [ "$RESPONSE" != "200" ]; then - if [ -z "$RESPONSE" ] || [ "$RESPONSE" == "000" ] || [ $CURL_EXIT_CODE -ne 0 ]; then - echo "Timeout or API not reachable. Continuing to next step." - else - echo "Subscription is not valid. Reach out to support@stepsecurity.io" - exit 1 - fi - fi - name: Check for Python3 id: python run: | - echo '##[group]Check for Python3' + # Check for Python3 + echo '::group::Check for Python3' + # we check version here just to execute `python3` with an argument # on Windows, there is a `python3.exe` that is a proxy to trigger installation from app store # command `which python3` finds that, but `python3 -V` does not return the version on stdout - if ! which python3 || [[ $(python3 -V) != *"python 3."* && $(python3 -V) != *"Python 3."* ]] + if ! which python3 || [[ "$(python3 -V)" != *"python 3."* && "$(python3 -V)" != *"Python 3."* ]] then - if ! which python || [[ $(python -V) != *"python 3."* && $(python -V) != *"Python 3."* ]] + if ! which python || [[ "$(python -V)" != *"python 3."* && "$(python -V)" != *"Python 3."* ]] then echo "::error::No python3 interpreter found. Please setup python before running this action. You could use https://github.com/actions/setup-python." exit 1 fi - interpreter="$(which python)" - if [[ ! -e "${interpreter}3" ]] - then - mkdir -p "$RUNNER_TEMP/bin/" - ln -s "$interpreter" "$RUNNER_TEMP/bin/python3" - echo "$RUNNER_TEMP/bin" >> $GITHUB_PATH - fi + PYTHON_BIN="$(python -c 'import sys; print(sys.executable)')" + else + PYTHON_BIN="$(python3 -c 'import sys; print(sys.executable)')" fi - echo "version=$(python3 -V)" >> $GITHUB_OUTPUT - echo '##[endgroup]' + + echo "Python that creates venv: $PYTHON_BIN" + echo "PYTHON_BIN=$PYTHON_BIN" >> "$GITHUB_ENV" + + PYTHON_VERSION="$($PYTHON_BIN -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')" + if [[ "$PYTHON_VERSION" == "3.7" ]] + then + echo "DEPENDENCIES_VERSION=3.7" >> "$GITHUB_ENV" + else + echo "DEPENDENCIES_VERSION=post-3.7" >> "$GITHUB_ENV" + fi + echo "version=$PYTHON_VERSION" >> "$GITHUB_OUTPUT" + + echo '::endgroup::' shell: bash - name: Detect OS id: os run: | + # Detect OS case "$RUNNER_OS" in Linux*) - echo "pip-cache=~/.cache/pip" >> $GITHUB_OUTPUT + echo "pip-cache=~/.cache/pip" >> "$GITHUB_OUTPUT" ;; macOS*) echo "pip-cache=~/Library/Caches/pip" >> $GITHUB_OUTPUT echo "pip-options=--break-system-packages" >> $GITHUB_OUTPUT ;; Windows*) - echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT - echo "pip-options=--user" >> $GITHUB_OUTPUT + echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> "$GITHUB_OUTPUT" + echo "pip-options=--user" >> "$GITHUB_OUTPUT" ;; esac shell: bash - name: Restore PIP packages cache - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 id: cache continue-on-error: true with: path: ${{ steps.os.outputs.pip-cache }} - key: step-security-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-df386fe4e04a72c96e140f0566a5c849 + key: step-security-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-e594996205319a7990b3a4ec677d10a3 - name: Create virtualenv id: venv @@ -223,57 +227,61 @@ runs: env: PIP_OPTIONS: ${{ steps.os.outputs.pip-options }} run: | - echo '##[group]Create virtualenv' - # install virtualenv, if it is not yet installed - python3 -m pip install $PIP_OPTIONS virtualenv - python3 -m virtualenv step-security-publish-action-venv - # test activating virtualenv + # Create virtualenv + echo '::group::Create virtualenv' + + echo "Python that creates venv: $PYTHON_BIN" + + echo "Creating virtual environment" + if ! "$PYTHON_BIN" -m virtualenv step-security-publish-action-venv && ! "$PYTHON_BIN" -m venv step-security-publish-action-venv + then + echo "Looks like there is neither virtualenv nor venv package installed" + if ! "$PYTHON_BIN" -m pip install $PIP_OPTIONS virtualenv && [ -n "$PIP_OPTIONS" ] + then + echo "Installing virtualenv package with PIP options '$PIP_OPTIONS' failed, now trying without" + if ! "$PYTHON_BIN" -m pip install virtualenv + then + echo "::error::Installing virtualenv package failed" + exit 1 + fi + fi + + if ! "$PYTHON_BIN" -m virtualenv step-security-publish-action-venv + then + echo "::error::Cannot create venv after installing virtualenv package" + exit 1 + fi + fi + + echo "Finding Python interpreter in venv" case "$RUNNER_OS" in Linux*|macOS*) - source step-security-publish-action-venv/bin/activate;; + PYTHON_VENV="step-security-publish-action-venv/bin/python";; Windows*) - source step-security-publish-action-venv\\Scripts\\activate;; + PYTHON_VENV="step-security-publish-action-venv\\Scripts\\python";; esac - which python3 - echo '##[endgroup]' + PYTHON_VENV="$("$PYTHON_VENV" -c 'import sys; print(sys.executable)')" + echo "Python in venv: $PYTHON_VENV" + echo "PYTHON_VENV=$PYTHON_VENV" >> "$GITHUB_ENV" + + echo '::endgroup::' shell: bash - name: Install Python dependencies - env: - PIP_OPTIONS: ${{ steps.os.outputs.pip-options }} run: | - echo '##[group]Install Python dependencies' - if [ "${{ steps.venv.outcome }}" == "success" ] - then - # activate virtualenv - case "$RUNNER_OS" in - Linux*|macOS*) - source step-security-publish-action-venv/bin/activate;; - Windows*) - source step-security-publish-action-venv\\Scripts\\activate;; - esac - fi - which python3 - - # make sure wheel is installed, which improves installing our dependencies - python3 -m pip install $PIP_OPTIONS wheel - python3 -m pip install $PIP_OPTIONS -r $GITHUB_ACTION_PATH/../python/requirements.txt - echo '##[endgroup]' + # Install Python dependencies + echo '::group::Install Python dependencies' + "$PYTHON_VENV" -m pip install -r "$GITHUB_ACTION_PATH/../python/requirements-$DEPENDENCIES_VERSION.txt" + echo '::endgroup::' shell: bash - name: Publish Test Results id: test-results run: | - echo '##[group]Publish Test Results' - # activate virtualenv - case "$RUNNER_OS" in - Linux*|macOS*) - source step-security-publish-action-venv/bin/activate;; - Windows*) - source step-security-publish-action-venv\\Scripts\\activate;; - esac - python3 $GITHUB_ACTION_PATH/../python/publish_test_results.py - echo '##[endgroup]' + # Publish Test Results + echo '::group::Publish Test Results' + "$PYTHON_VENV" "$GITHUB_ACTION_PATH/../python/publish_test_results.py" + echo '::endgroup::' env: GITHUB_TOKEN: ${{ inputs.github_token }} GITHUB_TOKEN_ACTOR: ${{ inputs.github_token_actor }} @@ -311,6 +319,7 @@ runs: JSON_THOUSANDS_SEPARATOR: ${{ inputs.json_thousands_separator }} JSON_SUITE_DETAILS: ${{ inputs.json_suite_details }} JSON_TEST_CASE_RESULTS: ${{ inputs.json_test_case_results }} + CHECK_RUN: ${{ inputs.check_run }} JOB_SUMMARY: ${{ inputs.job_summary }} SEARCH_PULL_REQUESTS: ${{ inputs.search_pull_requests }} # not documented @@ -320,7 +329,7 @@ runs: shell: bash - name: Save PIP packages cache - uses: actions/cache/save@v3 + uses: actions/cache/save@v4 if: ( success() || failure() ) && ! steps.cache.outputs.cache-hit continue-on-error: true with: @@ -328,5 +337,5 @@ runs: key: ${{ steps.cache.outputs.cache-primary-key }} branding: - icon: 'check-square' + icon: 'check-circle' color: 'green' diff --git a/linux/action.yml b/linux/action.yml new file mode 100644 index 00000000..71710bd6 --- /dev/null +++ b/linux/action.yml @@ -0,0 +1,305 @@ +name: 'Publish Test Results' + +description: 'Publishes JUnit, NUnit, XUnit, TRX, JSON test results on GitHub for .NET, Dart, Java, JS, Jest, Mocha, Python, Scala, …' + +inputs: + github_token: + description: 'GitHub API Access Token.' + default: ${{ github.token }} + required: false + github_token_actor: + description: 'The name of the GitHub app that owns the GitHub API Access Token (see github_token). Used to identify pull request comments created by this action during earlier runs. Has to be set when `github_token` is set to a GitHub app installation token (other than GitHub actions). Otherwise, existing comments will not be updated, but new comments created. Note: this does not change the bot name of the pull request comments. Defaults to "github-actions".' + default: 'github-actions' + required: false + github_retries: + description: 'Requests to the GitHub API are retried this number of times. The value must be a positive integer or zero.' + default: '10' + required: false + commit: + description: 'Commit SHA to which test results are published. Only needed if the value of GITHUB_SHA does not work for you.' + required: false + check_name: + description: 'Name of the created check run.' + default: 'Test Results' + required: false + comment_title: + description: 'An alternative title for the pull request comment. Defaults to value of check_name input.' + required: false + comment_mode: + description: 'The action posts comments to pull requests that are associated with the commit. Set to "always" - always comment, "changes" - comment when changes w.r.t. the target branch exist, "changes in failures" - when changes in the number of failures and errors exist, "changes in errors" - when changes in the number of (only) errors exist, "failures" - when failures or errors exist, "errors" - when (only) errors exist, "off" - to not create pull request comments.' + default: 'always' + required: false + fail_on: + description: 'The created test result check run has failure state if any test fails or test errors occur. Never fails when set to "nothing", fails only on errors when set to "errors". Default is "test failures".' + default: 'test failures' + required: false + action_fail: + description: 'When set "true", the action itself fails when tests have failed (see option fail_on).' + default: 'false' + required: false + action_fail_on_inconclusive: + description: 'When set "true", the action itself fails when tests are inconclusive (no test results).' + default: 'false' + required: false + files: + description: 'File patterns of test result files. Relative paths are known to work best, while the non-Docker action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' + required: false + junit_files: + description: 'Deprecated, use "files" option instead.' + required: false + nunit_files: + description: 'Deprecated, use "files" option instead.' + required: false + xunit_files: + description: 'Deprecated, use "files" option instead.' + required: false + trx_files: + description: 'Deprecated, use "files" option instead.' + required: false + time_unit: + description: 'Time values in the test result files have this unit. Supports "seconds" and "milliseconds".' + default: 'seconds' + required: false + test_file_prefix: + description: 'Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".' + required: false + report_individual_runs: + description: 'Individual runs of the same test may see different failures. Reports all individual failures when set "true" or the first only otherwise.' + required: false + report_suite_logs: + description: 'In addition to reporting regular test logs, also report test suite logs. These are logs provided on suite level, not individual test level. Set to "info" for normal output, "error" for error output, "any" for both, or "none" for no suite logs at all. Defaults to "none".' + default: 'none' + required: false + deduplicate_classes_by_file_name: + description: 'De-duplicates classes with same name by their file name when set "true", combines test results for those classes otherwise.' + required: false + large_files: + description: 'Support for large files is enabled when set to "true". Defaults to "false", unless ignore_runs is "true".' + required: false + ignore_runs: + description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.' + default: 'false' + required: false + check_run: + description: 'Set to "true", the results are published as a check run, but it may not be associated with the workflow that ran this action.' + default: 'true' + required: false + job_summary: + description: 'Set to "true", the results are published as part of the job summary page of the workflow run.' + default: 'true' + required: false + compare_to_earlier_commit: + description: 'Test results are compared to results of earlier commits to highlight changes: "false" - disable comparison, "true" - compare across commits.' + default: 'true' + required: false + pull_request_build: + description: 'As part of pull requests, GitHub builds a merge commit, which combines the commit and the target branch. If tests ran on the actual pushed commit, then set this to "commit". Defaults to "merge".' + default: 'merge' + required: false + event_file: + description: 'An alternative event file to use. Useful to replace a "workflow_run" event file with the actual source event file.' + required: false + event_name: + description: 'An alternative event name to use. Useful to replace a "workflow_run" event name with the actual source event name: github.event.workflow_run.event.' + required: false + test_changes_limit: + description: 'Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of 0. The default is 10.' + required: false + check_run_annotations: + description: 'Adds additional information to the check run. This is a comma-separated list of any of the following values: "all tests" - list all found tests, "skipped tests" - list all skipped tests. Set to "none" to add no extra annotations at all.' + default: 'all tests, skipped tests' + required: false + check_run_annotations_branch: + description: 'Adds check run annotations only on given branches. Comma-separated list of branch names allowed, asterisk "*" matches all branches. Defaults to event.repository.default_branch or "main, master".' + required: false + seconds_between_github_reads: + description: 'Sets the number of seconds the action waits between concurrent read requests to the GitHub API. This throttles the API usage to avoid abuse rate limits: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#abuse-rate-limits.' + default: '0.25' + required: false + seconds_between_github_writes: + description: 'Sets the number of seconds the action waits between concurrent write requests to the GitHub API. This throttles the API usage to avoid abuse rate limits: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#abuse-rate-limits.' + default: '2.0' + required: false + secondary_rate_limit_wait_seconds: + description: 'Sets the number of seconds to wait before retrying secondary rate limit errors. If not set, the default defined in the PyGithub library is used (currently 60 seconds).' + required: false + json_file: + description: 'Results are written to this JSON file.' + required: false + json_thousands_separator: + description: 'Formatted numbers in JSON use this character to separate groups of thousands. Common values are "," or ".". Defaults to punctuation space (\u2008).' + default: ' ' + required: false + json_suite_details: + description: 'Write out all suite details to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".' + default: 'false' + required: false + json_test_case_results: + description: 'Write out all individual test case results to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".' + default: 'false' + required: false + search_pull_requests: + description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".' + default: 'false' + required: false + +outputs: + json: + description: "Test results as JSON" + value: ${{ steps.test-results.outputs.json }} + +runs: + using: 'composite' + steps: + - name: Check for Python3 + id: python + run: | + # Check for Python3 + echo '::group::Check for Python3' + + # we check version here just to execute `python3` with an argument + # on Windows, there is a `python3.exe` that is a proxy to trigger installation from app store + # command `which python3` finds that, but `python3 -V` does not return the version on stdout + if ! which python3 || [[ "$(python3 -V)" != *"python 3."* && "$(python3 -V)" != *"Python 3."* ]] + then + if ! which python || [[ "$(python -V)" != *"python 3."* && "$(python -V)" != *"Python 3."* ]] + then + echo "::error::No python3 interpreter found. Please setup python before running this action. You could use https://github.com/actions/setup-python." + exit 1 + fi + + PYTHON_BIN="$(python -c 'import sys; print(sys.executable)')" + else + PYTHON_BIN="$(python3 -c 'import sys; print(sys.executable)')" + fi + + echo "Python that creates venv: $PYTHON_BIN" + echo "PYTHON_BIN=$PYTHON_BIN" >> "$GITHUB_ENV" + + PYTHON_VERSION="$($PYTHON_BIN -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')" + if [[ "$PYTHON_VERSION" == "3.7" ]] + then + echo "DEPENDENCIES_VERSION=3.7" >> "$GITHUB_ENV" + else + echo "DEPENDENCIES_VERSION=post-3.7" >> "$GITHUB_ENV" + fi + echo "version=$PYTHON_VERSION" >> "$GITHUB_OUTPUT" + + echo '::endgroup::' + shell: bash + + - name: Restore PIP packages cache + uses: actions/cache/restore@v4 + id: cache + continue-on-error: true + with: + path: '~/.cache/pip' + key: step-security-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-e594996205319a7990b3a4ec677d10a3 + + - name: Create virtualenv + id: venv + continue-on-error: true + run: | + # Create virtualenv + echo '::group::Create virtualenv' + + echo "Python that creates venv: $PYTHON_BIN" + + echo "Creating virtual environment" + if ! "$PYTHON_BIN" -m virtualenv step-security-publish-action-venv && ! "$PYTHON_BIN" -m venv step-security-publish-action-venv + then + echo "Looks like there is neither virtualenv nor venv package installed" + if ! "$PYTHON_BIN" -m pip install virtualenv + then + echo "::error::Installing virtualenv package failed" + exit 1 + fi + + if ! "$PYTHON_BIN" -m virtualenv step-security-publish-action-venv + then + echo "::error::Cannot create venv after installing virtualenv package" + exit 1 + fi + fi + + echo "Finding Python interpreter in venv" + PYTHON_VENV="step-security-publish-action-venv/bin/python" + PYTHON_VENV="$("$PYTHON_VENV" -c 'import sys; print(sys.executable)')" + echo "Python in venv: $PYTHON_VENV" + echo "PYTHON_VENV=$PYTHON_VENV" >> "$GITHUB_ENV" + + echo '::endgroup::' + shell: bash + + - name: Install Python dependencies + run: | + # Install Python dependencies + echo '::group::Install Python dependencies' + "$PYTHON_VENV" -m pip install -r "$GITHUB_ACTION_PATH/../python/requirements-$DEPENDENCIES_VERSION.txt" + echo '::endgroup::' + shell: bash + + - name: Publish Test Results + id: test-results + run: | + # Publish Test Results + echo '::group::Publish Test Results' + "$PYTHON_VENV" "$GITHUB_ACTION_PATH/../python/publish_test_results.py" + echo '::endgroup::' + env: + GITHUB_TOKEN: ${{ inputs.github_token }} + GITHUB_TOKEN_ACTOR: ${{ inputs.github_token_actor }} + GITHUB_RETRIES: ${{ inputs.github_retries }} + COMMIT: ${{ inputs.commit }} + CHECK_NAME: ${{ inputs.check_name }} + COMMENT_TITLE: ${{ inputs.comment_title }} + COMMENT_MODE: ${{ inputs.comment_mode }} + FAIL_ON: ${{ inputs.fail_on }} + ACTION_FAIL: ${{ inputs.action_fail }} + ACTION_FAIL_ON_INCONCLUSIVE: ${{ inputs.action_fail_on_inconclusive }} + FILES: ${{ inputs.files }} + JUNIT_FILES: ${{ inputs.junit_files }} + NUNIT_FILES: ${{ inputs.nunit_files }} + XUNIT_FILES: ${{ inputs.xunit_files }} + TRX_FILES: ${{ inputs.trx_files }} + TIME_UNIT: ${{ inputs.time_unit }} + TEST_FILE_PREFIX: ${{ inputs.test_file_prefix }} + REPORT_INDIVIDUAL_RUNS: ${{ inputs.report_individual_runs }} + REPORT_SUITE_LOGS: ${{ inputs.report_suite_logs }} + DEDUPLICATE_CLASSES_BY_FILE_NAME: ${{ inputs.deduplicate_classes_by_file_name }} + LARGE_FILES: ${{ inputs.large_files }} + IGNORE_RUNS: ${{ inputs.ignore_runs }} + COMPARE_TO_EARLIER_COMMIT: ${{ inputs.compare_to_earlier_commit }} + PULL_REQUEST_BUILD: ${{ inputs.pull_request_build }} + EVENT_FILE: ${{ inputs.event_file }} + EVENT_NAME: ${{ inputs.event_name }} + TEST_CHANGES_LIMIT: ${{ inputs.test_changes_limit }} + CHECK_RUN_ANNOTATIONS: ${{ inputs.check_run_annotations }} + CHECK_RUN_ANNOTATIONS_BRANCH: ${{ inputs.check_run_annotations_branch }} + SECONDS_BETWEEN_GITHUB_READS: ${{ inputs.seconds_between_github_reads }} + SECONDS_BETWEEN_GITHUB_WRITES: ${{ inputs.seconds_between_github_writes }} + SECONDARY_RATE_LIMIT_WAIT_SECONDS: ${{ inputs.secondary_rate_limit_wait_seconds }} + JSON_FILE: ${{ inputs.json_file }} + JSON_THOUSANDS_SEPARATOR: ${{ inputs.json_thousands_separator }} + JSON_SUITE_DETAILS: ${{ inputs.json_suite_details }} + JSON_TEST_CASE_RESULTS: ${{ inputs.json_test_case_results }} + CHECK_RUN: ${{ inputs.check_run }} + JOB_SUMMARY: ${{ inputs.job_summary }} + SEARCH_PULL_REQUESTS: ${{ inputs.search_pull_requests }} + # not documented + ROOT_LOG_LEVEL: ${{ inputs.root_log_level }} + # not documented + LOG_LEVEL: ${{ inputs.log_level }} + shell: bash + + - name: Save PIP packages cache + uses: actions/cache/save@v4 + if: ( success() || failure() ) && ! steps.cache.outputs.cache-hit + continue-on-error: true + with: + path: '~/.cache/pip' + key: ${{ steps.cache.outputs.cache-primary-key }} + +branding: + icon: 'check-circle' + color: 'green' diff --git a/macos/action.yml b/macos/action.yml new file mode 100644 index 00000000..a2fa0c2e --- /dev/null +++ b/macos/action.yml @@ -0,0 +1,305 @@ +name: 'Publish Test Results' + +description: 'Publishes JUnit, NUnit, XUnit, TRX, JSON test results on GitHub for .NET, Dart, Java, JS, Jest, Mocha, Python, Scala, …' + +inputs: + github_token: + description: 'GitHub API Access Token.' + default: ${{ github.token }} + required: false + github_token_actor: + description: 'The name of the GitHub app that owns the GitHub API Access Token (see github_token). Used to identify pull request comments created by this action during earlier runs. Has to be set when `github_token` is set to a GitHub app installation token (other than GitHub actions). Otherwise, existing comments will not be updated, but new comments created. Note: this does not change the bot name of the pull request comments. Defaults to "github-actions".' + default: 'github-actions' + required: false + github_retries: + description: 'Requests to the GitHub API are retried this number of times. The value must be a positive integer or zero.' + default: '10' + required: false + commit: + description: 'Commit SHA to which test results are published. Only needed if the value of GITHUB_SHA does not work for you.' + required: false + check_name: + description: 'Name of the created check run.' + default: 'Test Results' + required: false + comment_title: + description: 'An alternative title for the pull request comment. Defaults to value of check_name input.' + required: false + comment_mode: + description: 'The action posts comments to pull requests that are associated with the commit. Set to "always" - always comment, "changes" - comment when changes w.r.t. the target branch exist, "changes in failures" - when changes in the number of failures and errors exist, "changes in errors" - when changes in the number of (only) errors exist, "failures" - when failures or errors exist, "errors" - when (only) errors exist, "off" - to not create pull request comments.' + default: 'always' + required: false + fail_on: + description: 'The created test result check run has failure state if any test fails or test errors occur. Never fails when set to "nothing", fails only on errors when set to "errors". Default is "test failures".' + default: 'test failures' + required: false + action_fail: + description: 'When set "true", the action itself fails when tests have failed (see option fail_on).' + default: 'false' + required: false + action_fail_on_inconclusive: + description: 'When set "true", the action itself fails when tests are inconclusive (no test results).' + default: 'false' + required: false + files: + description: 'File patterns of test result files. Relative paths are known to work best, while the non-Docker action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".' + required: false + junit_files: + description: 'Deprecated, use "files" option instead.' + required: false + nunit_files: + description: 'Deprecated, use "files" option instead.' + required: false + xunit_files: + description: 'Deprecated, use "files" option instead.' + required: false + trx_files: + description: 'Deprecated, use "files" option instead.' + required: false + time_unit: + description: 'Time values in the test result files have this unit. Supports "seconds" and "milliseconds".' + default: 'seconds' + required: false + test_file_prefix: + description: 'Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".' + required: false + report_individual_runs: + description: 'Individual runs of the same test may see different failures. Reports all individual failures when set "true" or the first only otherwise.' + required: false + report_suite_logs: + description: 'In addition to reporting regular test logs, also report test suite logs. These are logs provided on suite level, not individual test level. Set to "info" for normal output, "error" for error output, "any" for both, or "none" for no suite logs at all. Defaults to "none".' + default: 'none' + required: false + deduplicate_classes_by_file_name: + description: 'De-duplicates classes with same name by their file name when set "true", combines test results for those classes otherwise.' + required: false + large_files: + description: 'Support for large files is enabled when set to "true". Defaults to "false", unless ignore_runs is "true".' + required: false + ignore_runs: + description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.' + default: 'false' + required: false + check_run: + description: 'Set to "true", the results are published as a check run, but it may not be associated with the workflow that ran this action.' + default: 'true' + required: false + job_summary: + description: 'Set to "true", the results are published as part of the job summary page of the workflow run.' + default: 'true' + required: false + compare_to_earlier_commit: + description: 'Test results are compared to results of earlier commits to highlight changes: "false" - disable comparison, "true" - compare across commits.' + default: 'true' + required: false + pull_request_build: + description: 'As part of pull requests, GitHub builds a merge commit, which combines the commit and the target branch. If tests ran on the actual pushed commit, then set this to "commit". Defaults to "merge".' + default: 'merge' + required: false + event_file: + description: 'An alternative event file to use. Useful to replace a "workflow_run" event file with the actual source event file.' + required: false + event_name: + description: 'An alternative event name to use. Useful to replace a "workflow_run" event name with the actual source event name: github.event.workflow_run.event.' + required: false + test_changes_limit: + description: 'Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of 0. The default is 10.' + required: false + check_run_annotations: + description: 'Adds additional information to the check run. This is a comma-separated list of any of the following values: "all tests" - list all found tests, "skipped tests" - list all skipped tests. Set to "none" to add no extra annotations at all.' + default: 'all tests, skipped tests' + required: false + check_run_annotations_branch: + description: 'Adds check run annotations only on given branches. Comma-separated list of branch names allowed, asterisk "*" matches all branches. Defaults to event.repository.default_branch or "main, master".' + required: false + seconds_between_github_reads: + description: 'Sets the number of seconds the action waits between concurrent read requests to the GitHub API. This throttles the API usage to avoid abuse rate limits: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#abuse-rate-limits.' + default: '0.25' + required: false + seconds_between_github_writes: + description: 'Sets the number of seconds the action waits between concurrent write requests to the GitHub API. This throttles the API usage to avoid abuse rate limits: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#abuse-rate-limits.' + default: '2.0' + required: false + secondary_rate_limit_wait_seconds: + description: 'Sets the number of seconds to wait before retrying secondary rate limit errors. If not set, the default defined in the PyGithub library is used (currently 60 seconds).' + required: false + json_file: + description: 'Results are written to this JSON file.' + required: false + json_thousands_separator: + description: 'Formatted numbers in JSON use this character to separate groups of thousands. Common values are "," or ".". Defaults to punctuation space (\u2008).' + default: ' ' + required: false + json_suite_details: + description: 'Write out all suite details to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".' + default: 'false' + required: false + json_test_case_results: + description: 'Write out all individual test case results to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".' + default: 'false' + required: false + search_pull_requests: + description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".' + default: 'false' + required: false + +outputs: + json: + description: "Test results as JSON" + value: ${{ steps.test-results.outputs.json }} + +runs: + using: 'composite' + steps: + - name: Check for Python3 + id: python + run: | + # Check for Python3 + echo '::group::Check for Python3' + + # we check version here just to execute `python3` with an argument + # on Windows, there is a `python3.exe` that is a proxy to trigger installation from app store + # command `which python3` finds that, but `python3 -V` does not return the version on stdout + if ! which python3 || [[ "$(python3 -V)" != *"python 3."* && "$(python3 -V)" != *"Python 3."* ]] + then + if ! which python || [[ "$(python -V)" != *"python 3."* && "$(python -V)" != *"Python 3."* ]] + then + echo "::error::No python3 interpreter found. Please setup python before running this action. You could use https://github.com/actions/setup-python." + exit 1 + fi + + PYTHON_BIN="$(python -c 'import sys; print(sys.executable)')" + else + PYTHON_BIN="$(python3 -c 'import sys; print(sys.executable)')" + fi + + echo "Python that creates venv: $PYTHON_BIN" + echo "PYTHON_BIN=$PYTHON_BIN" >> "$GITHUB_ENV" + + PYTHON_VERSION="$($PYTHON_BIN -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')" + if [[ "$PYTHON_VERSION" == "3.7" ]] + then + echo "DEPENDENCIES_VERSION=3.7" >> "$GITHUB_ENV" + else + echo "DEPENDENCIES_VERSION=post-3.7" >> "$GITHUB_ENV" + fi + echo "version=$PYTHON_VERSION" >> "$GITHUB_OUTPUT" + + echo '::endgroup::' + shell: bash + + - name: Restore PIP packages cache + uses: actions/cache/restore@v4 + id: cache + continue-on-error: true + with: + path: '~/Library/Caches/pip' + key: step-security-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-e594996205319a7990b3a4ec677d10a3 + + - name: Create virtualenv + id: venv + continue-on-error: true + run: | + # Create virtualenv + echo '::group::Create virtualenv' + + echo "Python that creates venv: $PYTHON_BIN" + + echo "Creating virtual environment" + if ! "$PYTHON_BIN" -m virtualenv step-security-publish-action-venv && ! "$PYTHON_BIN" -m venv step-security-publish-action-venv + then + echo "Looks like there is neither virtualenv nor venv package installed" + if ! "$PYTHON_BIN" -m pip install virtualenv + then + echo "::error::Installing virtualenv package failed" + exit 1 + fi + + if ! "$PYTHON_BIN" -m virtualenv step-security-publish-action-venv + then + echo "::error::Cannot create venv after installing virtualenv package" + exit 1 + fi + fi + + echo "Finding Python interpreter in venv" + PYTHON_VENV="step-security-publish-action-venv/bin/python" + PYTHON_VENV="$("$PYTHON_VENV" -c 'import sys; print(sys.executable)')" + echo "Python in venv: $PYTHON_VENV" + echo "PYTHON_VENV=$PYTHON_VENV" >> "$GITHUB_ENV" + + echo '::endgroup::' + shell: bash + + - name: Install Python dependencies + run: | + # Install Python dependencies + echo '::group::Install Python dependencies' + "$PYTHON_VENV" -m pip install -r "$GITHUB_ACTION_PATH/../python/requirements-$DEPENDENCIES_VERSION.txt" + echo '::endgroup::' + shell: bash + + - name: Publish Test Results + id: test-results + run: | + # Publish Test Results + echo '::group::Publish Test Results' + "$PYTHON_VENV" "$GITHUB_ACTION_PATH/../python/publish_test_results.py" + echo '::endgroup::' + env: + GITHUB_TOKEN: ${{ inputs.github_token }} + GITHUB_TOKEN_ACTOR: ${{ inputs.github_token_actor }} + GITHUB_RETRIES: ${{ inputs.github_retries }} + COMMIT: ${{ inputs.commit }} + CHECK_NAME: ${{ inputs.check_name }} + COMMENT_TITLE: ${{ inputs.comment_title }} + COMMENT_MODE: ${{ inputs.comment_mode }} + FAIL_ON: ${{ inputs.fail_on }} + ACTION_FAIL: ${{ inputs.action_fail }} + ACTION_FAIL_ON_INCONCLUSIVE: ${{ inputs.action_fail_on_inconclusive }} + FILES: ${{ inputs.files }} + JUNIT_FILES: ${{ inputs.junit_files }} + NUNIT_FILES: ${{ inputs.nunit_files }} + XUNIT_FILES: ${{ inputs.xunit_files }} + TRX_FILES: ${{ inputs.trx_files }} + TIME_UNIT: ${{ inputs.time_unit }} + TEST_FILE_PREFIX: ${{ inputs.test_file_prefix }} + REPORT_INDIVIDUAL_RUNS: ${{ inputs.report_individual_runs }} + REPORT_SUITE_LOGS: ${{ inputs.report_suite_logs }} + DEDUPLICATE_CLASSES_BY_FILE_NAME: ${{ inputs.deduplicate_classes_by_file_name }} + LARGE_FILES: ${{ inputs.large_files }} + IGNORE_RUNS: ${{ inputs.ignore_runs }} + COMPARE_TO_EARLIER_COMMIT: ${{ inputs.compare_to_earlier_commit }} + PULL_REQUEST_BUILD: ${{ inputs.pull_request_build }} + EVENT_FILE: ${{ inputs.event_file }} + EVENT_NAME: ${{ inputs.event_name }} + TEST_CHANGES_LIMIT: ${{ inputs.test_changes_limit }} + CHECK_RUN_ANNOTATIONS: ${{ inputs.check_run_annotations }} + CHECK_RUN_ANNOTATIONS_BRANCH: ${{ inputs.check_run_annotations_branch }} + SECONDS_BETWEEN_GITHUB_READS: ${{ inputs.seconds_between_github_reads }} + SECONDS_BETWEEN_GITHUB_WRITES: ${{ inputs.seconds_between_github_writes }} + SECONDARY_RATE_LIMIT_WAIT_SECONDS: ${{ inputs.secondary_rate_limit_wait_seconds }} + JSON_FILE: ${{ inputs.json_file }} + JSON_THOUSANDS_SEPARATOR: ${{ inputs.json_thousands_separator }} + JSON_SUITE_DETAILS: ${{ inputs.json_suite_details }} + JSON_TEST_CASE_RESULTS: ${{ inputs.json_test_case_results }} + CHECK_RUN: ${{ inputs.check_run }} + JOB_SUMMARY: ${{ inputs.job_summary }} + SEARCH_PULL_REQUESTS: ${{ inputs.search_pull_requests }} + # not documented + ROOT_LOG_LEVEL: ${{ inputs.root_log_level }} + # not documented + LOG_LEVEL: ${{ inputs.log_level }} + shell: bash + + - name: Save PIP packages cache + uses: actions/cache/save@v4 + if: ( success() || failure() ) && ! steps.cache.outputs.cache-hit + continue-on-error: true + with: + path: '~/Library/Caches/pip' + key: ${{ steps.cache.outputs.cache-primary-key }} + +branding: + icon: 'check-circle' + color: 'green' diff --git a/misc/action/json-output/action.yml b/misc/action/json-output/action.yml index e12e588f..28163c03 100644 --- a/misc/action/json-output/action.yml +++ b/misc/action/json-output/action.yml @@ -1,4 +1,5 @@ name: 'Assert JSON output' + description: 'A GitHub Action that asserts the publish action''s JSON output' inputs: @@ -58,5 +59,5 @@ runs: fi branding: - icon: 'check-square' + icon: 'check-circle' color: 'green' diff --git a/python/publish/__init__.py b/python/publish/__init__.py index 9bc98839..96975aaf 100644 --- a/python/publish/__init__.py +++ b/python/publish/__init__.py @@ -11,14 +11,14 @@ UnitTestRunDeltaResults, UnitTestRunResultsOrDeltaResults, ParseError # keep the version in sync with action.yml -__version__ = 'v1.0.3' +__version__ = 'v2.17.0' logger = logging.getLogger('publish') digest_prefix = '[test-results]:data:' digest_mime_type = 'application/gzip' digest_encoding = 'base64' digest_header = f'{digest_prefix}{digest_mime_type};{digest_encoding},' -digit_space = '  ' +digit_space = ' ' punctuation_space = ' ' comment_mode_off = 'off' @@ -446,11 +446,11 @@ def get_link_and_tooltip_label_md(label: str, tooltip: str) -> str: all_tests_label_md = 'tests' -passed_tests_label_md = get_link_and_tooltip_label_md(':heavy_check_mark:', 'passed tests') -skipped_tests_label_md = get_link_and_tooltip_label_md(':zzz:', 'skipped / disabled tests') -failed_tests_label_md = get_link_and_tooltip_label_md(':x:', 'failed tests') -test_errors_label_md = get_link_and_tooltip_label_md(':fire:', 'test errors') -duration_label_md = get_link_and_tooltip_label_md(':stopwatch:', 'duration of all tests') +passed_tests_label_md = ':white_check_mark:' +skipped_tests_label_md = ':zzz:' +failed_tests_label_md = ':x:' +test_errors_label_md = ':fire:' +duration_label_md = ':stopwatch:' def get_short_summary_md(stats: UnitTestRunResultsOrDeltaResults) -> str: @@ -637,7 +637,7 @@ def get_long_summary_with_runs_md(stats: UnitTestRunResultsOrDeltaResults, runs_error=as_stat_number(stats.runs_error, error_digits, error_delta_digits, test_errors_label_md) ) if get_magnitude(stats.runs_error) else '' runs_line = '{runs} {runs_succ} {runs_skip} {runs_fail}{runs_error_part}\n'.format( - runs=as_stat_number(stats.runs, files_digits, files_delta_digits, 'runs '), + runs=as_stat_number(stats.runs, files_digits, files_delta_digits, 'runs '), runs_succ=as_stat_number(stats.runs_succ, success_digits, success_delta_digits, passed_tests_label_md), runs_skip=as_stat_number(stats.runs_skip, skip_digits, skip_delta_digits, skipped_tests_label_md), runs_fail=as_stat_number(stats.runs_fail, fail_digits, fail_delta_digits, failed_tests_label_md), diff --git a/python/publish/publisher.py b/python/publish/publisher.py index ca961279..733bebe4 100644 --- a/python/publish/publisher.py +++ b/python/publish/publisher.py @@ -3,9 +3,9 @@ import logging import os import re -from dataclasses import dataclass -from typing import List, Set, Any, Optional, Tuple, Mapping, Dict, Union, Callable from copy import deepcopy +from dataclasses import dataclass +from typing import List, Any, Optional, Tuple, Mapping, Dict, Union, Callable from github import Github, GithubException, UnknownObjectException from github.CheckRun import CheckRun @@ -24,7 +24,7 @@ from publish import logger from publish.github_action import GithubAction from publish.unittestresults import UnitTestCaseResults, UnitTestRunResults, UnitTestRunDeltaResults, \ - UnitTestRunResultsOrDeltaResults, get_stats_delta, create_unit_test_case_results + UnitTestRunResultsOrDeltaResults, get_stats_delta, get_diff_value @dataclass(frozen=True) @@ -59,6 +59,7 @@ class Settings: check_name: str comment_title: str comment_mode: str + check_run: bool job_summary: bool compare_earlier: bool pull_request_build: str @@ -80,13 +81,18 @@ class Settings: class PublishData: title: str summary: str + summary_with_digest: Optional[str] conclusion: str stats: UnitTestRunResults stats_with_delta: Optional[UnitTestRunDeltaResults] + before_stats: Optional[UnitTestRunResults] annotations: List[Annotation] - check_url: str + check_url: Optional[str] cases: Optional[UnitTestCaseResults] + def with_check_url(self, url: str) -> 'PublishData': + return dataclasses.replace(self, check_url=url) + def without_exceptions(self) -> 'PublishData': return dataclasses.replace( self, @@ -99,11 +105,19 @@ def without_exceptions(self) -> 'PublishData': ) def without_suite_details(self) -> 'PublishData': - return dataclasses.replace(self, stats=self.stats.without_suite_details()) + return dataclasses.replace( + self, + stats=self.stats.without_suite_details() if self.stats is not None else None, + stats_with_delta=self.stats_with_delta.without_suite_details() if self.stats_with_delta is not None else None, + before_stats=self.before_stats.without_suite_details() if self.before_stats is not None else None + ) def without_cases(self) -> 'PublishData': return dataclasses.replace(self, cases=None) + def without_summary_with_digest(self) -> 'PublishData': + return dataclasses.replace(self, summary_with_digest=None) + @classmethod def _format_digit(cls, value: Union[int, Mapping[str, int], Any], thousands_separator: str) -> Union[str, Mapping[str, str], Any]: if isinstance(value, int): @@ -120,12 +134,15 @@ def _format(cls, stats: Mapping[str, Any], thousands_separator: str) -> Dict[str def _formatted_stats_and_delta(cls, stats: Optional[Mapping[str, Any]], stats_with_delta: Optional[Mapping[str, Any]], + before_stats: Optional[Mapping[str, Any]], thousands_separator: str) -> Mapping[str, Any]: d = {} if stats is not None: d.update(stats=cls._format(stats, thousands_separator)) if stats_with_delta is not None: d.update(stats_with_delta=cls._format(stats_with_delta, thousands_separator)) + if before_stats is not None: + d.update(before_stats=cls._format(before_stats, thousands_separator)) return d def _as_dict(self) -> Dict[str, Any]: @@ -133,7 +150,7 @@ def _as_dict(self) -> Dict[str, Any]: return dataclasses.asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if v is not None}) def to_dict(self, thousands_separator: str, with_suite_details: bool, with_cases: bool) -> Mapping[str, Any]: - data = self.without_exceptions() + data = self.without_exceptions().without_summary_with_digest() if not with_suite_details: data = data.without_suite_details() if not with_cases: @@ -151,29 +168,33 @@ def to_dict(self, thousands_separator: str, with_suite_details: bool, with_cases # provide formatted stats and delta d.update(formatted=self._formatted_stats_and_delta( - d.get('stats'), d.get('stats_with_delta'), thousands_separator + d.get('stats'), d.get('stats_with_delta'), d.get('before_stats'), thousands_separator )) return d def to_reduced_dict(self, thousands_separator: str) -> Mapping[str, Any]: # remove exceptions, suite details and cases - data = self.without_exceptions().without_suite_details().without_cases()._as_dict() + data = self.without_exceptions().without_summary_with_digest().without_suite_details().without_cases()._as_dict() # replace some large fields with their lengths and delete individual test cases if present def reduce(d: Dict[str, Any]) -> Dict[str, Any]: d = deepcopy(d) if d.get('stats', {}).get('errors') is not None: d['stats']['errors'] = len(d['stats']['errors']) - if d.get('stats_with_delta', {}).get('errors') is not None: - d['stats_with_delta']['errors'] = len(d['stats_with_delta']['errors']) + if d.get('before_stats', {}).get('errors') is not None: + d['before_stats']['errors'] = len(d['before_stats']['errors']) + if d.get('stats', {}).get('errors') is not None and \ + d.get('before_stats', {}).get('errors') is not None and \ + d.get('stats_with_delta', {}).get('errors') is not None: + d['stats_with_delta']['errors'] = get_diff_value(d['stats']['errors'], d['before_stats']['errors']) if d.get('annotations') is not None: d['annotations'] = len(d['annotations']) return d data = reduce(data) data.update(formatted=self._formatted_stats_and_delta( - data.get('stats'), data.get('stats_with_delta'), thousands_separator + data.get('stats'), data.get('stats_with_delta'), data.get('before_stats'), thousands_separator )) return data @@ -196,31 +217,35 @@ def publish(self, if logger.isEnabledFor(logging.DEBUG): logger.debug(f'Publishing {stats}') - if self._settings.is_fork: - # running on a fork, we cannot publish the check, but we can still read before_check_run - # bump the version if you change the target of this link (if it did not exist already) or change the section - logger.info('This action is running on a pull_request event for a fork repository. ' - 'Pull request comments and check runs cannot be created, so disabling these features. ' - 'To fully run the action on fork repository pull requests, see ' - f'https://github.com/step-security/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') - check_run = None - before_check_run = None - if self._settings.compare_earlier: - before_commit_sha = get_json_path(self._settings.event, 'before') - logger.debug(f'comparing against before={before_commit_sha}') - before_check_run = self.get_check_run(before_commit_sha) - else: - check_run, before_check_run = self.publish_check(stats, cases, conclusion) + # construct publish data (test results) + data = self.get_publish_data(stats, cases, conclusion) + + # publish the check status + if self._settings.check_run: + if self._settings.is_fork: + # running on a fork, we cannot publish the check, but we can still read before_check_run + # bump the version if you change the target of this link (if it did not exist already) or change the section + logger.info('This action is running on a pull_request event for a fork repository. ' + 'Pull request comments and check runs cannot be created, so disabling these features. ' + 'To fully run the action on fork repository pull requests, see ' + f'https://github.com/step-security/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches') + else: + data = self.publish_check(data) + # create data as json + self.publish_json(data) + + # publish job summary if self._settings.job_summary: - self.publish_job_summary(self._settings.comment_title, stats, check_run, before_check_run) + self.publish_job_summary(self._settings.comment_title, data) + # publish pr comments if not self._settings.is_fork: if self._settings.comment_mode != comment_mode_off: pulls = self.get_pulls(self._settings.commit) if pulls: for pull in pulls: - self.publish_comment(self._settings.comment_title, stats, pull, check_run, cases) + self.publish_comment(self._settings.comment_title, stats, pull, data.check_url, cases) else: logger.info(f'There is no pull request for commit {self._settings.commit}') else: @@ -382,14 +407,13 @@ def get_test_list_from_annotation(annotation: CheckRunAnnotation) -> Optional[Li return None return annotation.raw_details.split('\n') - def publish_check(self, - stats: UnitTestRunResults, - cases: UnitTestCaseResults, - conclusion: str) -> Tuple[CheckRun, Optional[CheckRun]]: + def get_publish_data(self, + stats: UnitTestRunResults, + cases: UnitTestCaseResults, + conclusion: str) -> PublishData: # get stats from earlier commits before_stats = None - before_check_run = None - if self._settings.compare_earlier: + if self._settings.compare_earlier and self._settings.check_run: before_commit_sha = get_json_path(self._settings.event, 'before') logger.debug(f'comparing against before={before_commit_sha}') before_check_run = self.get_check_run(before_commit_sha) @@ -405,16 +429,30 @@ def publish_check(self, title = get_short_summary(stats) summary = get_long_summary_md(stats_with_delta) + summary_with_digest = get_long_summary_with_digest_md(stats_with_delta, stats) + return PublishData( + title=title, + summary=summary, + summary_with_digest=summary_with_digest, + conclusion=conclusion, + stats=stats, + stats_with_delta=stats_with_delta if before_stats is not None else None, + before_stats=before_stats, + annotations=all_annotations, + check_url=None, + cases=cases + ) + + def publish_check(self, data: PublishData) -> PublishData: # we can send only 50 annotations at once, so we split them into chunks of 50 check_run = None - summary_with_digest = get_long_summary_with_digest_md(stats_with_delta, stats) - split_annotations = [annotation.to_dict() for annotation in all_annotations] + split_annotations = [annotation.to_dict() for annotation in data.annotations] split_annotations = [split_annotations[x:x+50] for x in range(0, len(split_annotations), 50)] or [[]] for annotations in split_annotations: output = dict( - title=title, - summary=summary_with_digest, + title=data.title, + summary=data.summary_with_digest, annotations=annotations ) @@ -423,7 +461,7 @@ def publish_check(self, check_run = self._repo.create_check_run(name=self._settings.check_name, head_sha=self._settings.commit, status='completed', - conclusion=conclusion, + conclusion=data.conclusion, output=output) logger.info(f'Created check {check_run.html_url}') else: @@ -431,30 +469,13 @@ def publish_check(self, check_run.edit(output=output) logger.debug(f'updated check') - # create full json - data = PublishData( - title=title, - summary=summary, - conclusion=conclusion, - stats=stats, - stats_with_delta=stats_with_delta if before_stats is not None else None, - annotations=all_annotations, - check_url=check_run.html_url, - cases=cases - ) - self.publish_json(data) - - return check_run, before_check_run + return data.with_check_url(check_run.html_url) def publish_json(self, data: PublishData): if self._settings.json_file: try: with open(self._settings.json_file, 'wt', encoding='utf-8') as w: - json.dump(data.to_dict( - self._settings.json_thousands_separator, - self._settings.json_suite_details, - self._settings.json_test_case_results - ), w, ensure_ascii=False) + self.write_json(data, w, self._settings) except Exception as e: self._gha.error(f'Failed to write JSON file {self._settings.json_file}: {str(e)}') try: @@ -465,15 +486,18 @@ def publish_json(self, data: PublishData): # provide a reduced version to Github actions self._gha.add_to_output('json', json.dumps(data.to_reduced_dict(self._settings.json_thousands_separator), ensure_ascii=False)) - def publish_job_summary(self, - title: str, - stats: UnitTestRunResults, - check_run: CheckRun, - before_check_run: Optional[CheckRun]): - before_stats = self.get_stats_from_check_run(before_check_run) if before_check_run is not None else None - stats_with_delta = get_stats_delta(stats, before_stats, 'earlier') if before_stats is not None else stats - - details_url = check_run.html_url if check_run else None + @staticmethod + def write_json(data: PublishData, writer, settings: Settings): + json.dump(data.to_dict( + settings.json_thousands_separator, + settings.json_suite_details, + settings.json_test_case_results + ), writer, ensure_ascii=False, indent=2) + + def publish_job_summary(self, title: str, data: PublishData): + title = title + stats_with_delta = data.stats_with_delta if data.stats_with_delta is not None else data.stats + details_url = data.check_url summary = get_long_summary_md(stats_with_delta, details_url) markdown = f'## {title}\n{summary}' self._gha.add_to_job_summary(markdown) @@ -532,11 +556,11 @@ def publish_comment(self, title: str, stats: UnitTestRunResults, pull_request: PullRequest, - check_run: Optional[CheckRun] = None, + details_url: Optional[str] = None, cases: Optional[UnitTestCaseResults] = None): # compare them with earlier stats base_check_run = None - if self._settings.compare_earlier: + if self._settings.compare_earlier and self._settings.check_run: base_commit_sha = self.get_base_commit_sha(pull_request) if stats.commit == base_commit_sha: # we do not publish a comment when we compare the commit to itself @@ -565,7 +589,6 @@ def publish_comment(self, logger.info(f'No pull request comment required as comment mode is {self._settings.comment_mode} (comment_mode)') return - details_url = check_run.html_url if check_run else None summary = get_long_summary_with_digest_md(stats_with_delta, stats, details_url, test_changes, self._settings.test_changes_limit) body = f'## {title}\n{summary}' diff --git a/python/publish/unittestresults.py b/python/publish/unittestresults.py index b46070f7..ada06f12 100644 --- a/python/publish/unittestresults.py +++ b/python/publish/unittestresults.py @@ -312,6 +312,8 @@ class UnitTestRunDeltaResults: suites: Numeric duration: Numeric + suite_details: Optional[List[UnitTestSuite]] + tests: Numeric tests_succ: Numeric tests_skip: Numeric @@ -378,6 +380,9 @@ def d(value: Numeric) -> int: def without_exceptions(self) -> 'UnitTestRunDeltaResults': return dataclasses.replace(self, errors=[error.without_exception() for error in self.errors]) + def without_suite_details(self) -> 'UnitTestRunDeltaResults': + return dataclasses.replace(self, suite_details=None) + UnitTestRunResultsOrDeltaResults = Union[UnitTestRunResults, UnitTestRunDeltaResults] @@ -498,6 +503,8 @@ def get_stats_delta(stats: UnitTestRunResults, suites=get_diff_value(stats.suites, reference_stats.suites), duration=get_diff_value(stats.duration, reference_stats.duration, 'duration'), + suite_details=stats.suite_details, + tests=get_diff_value(stats.tests, reference_stats.tests), tests_succ=get_diff_value(stats.tests_succ, reference_stats.tests_succ), tests_skip=get_diff_value(stats.tests_skip, reference_stats.tests_skip), diff --git a/python/publish_test_results.py b/python/publish_test_results.py index 313215ff..df23f26d 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -190,6 +190,7 @@ def parse_files(settings: Settings, gha: GithubAction) -> ParsedUnitTestResultsW elems = [] # parse files, log the progress + # https://github.com/step-security/publish-unit-test-result-action/issues/304 with progress_logger(items=len(files + junit_files + nunit_files + xunit_files + trx_files), interval_seconds=10, progress_template='Read {progress} files in {time}', @@ -226,7 +227,7 @@ def log_parse_errors(errors: List[ParseError], gha: GithubAction): def action_fail_required(conclusion: str, action_fail: bool, action_fail_on_inconclusive: bool) -> bool: return action_fail and conclusion == 'failure' or \ - action_fail_on_inconclusive and conclusion == 'inconclusive' + action_fail_on_inconclusive and conclusion == 'neutral' def validate_subscription(): @@ -281,7 +282,8 @@ def main(settings: Settings, gha: GithubAction) -> None: Publisher(settings, gh, gha).publish(stats, results.case_results, conclusion) if action_fail_required(conclusion, settings.action_fail, settings.action_fail_on_inconclusive): - gha.error(f'This action finished successfully, but test results have status {conclusion}.') + status = f"{conclusion} / inconclusive" if conclusion == "neutral" else conclusion + gha.error(f'This action finished successfully, but test results have status {status}.') sys.exit(1) @@ -408,6 +410,7 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: event = json.load(f) repo = get_var('GITHUB_REPOSITORY', options) + check_run = get_bool_var('CHECK_RUN', options, default=True) job_summary = get_bool_var('JOB_SUMMARY', options, default=True) comment_mode = get_var('COMMENT_MODE', options) or comment_mode_always @@ -486,6 +489,7 @@ def get_settings(options: dict, gha: GithubAction) -> Settings: check_name=check_name, comment_title=get_var('COMMENT_TITLE', options) or check_name, comment_mode=comment_mode, + check_run=check_run, job_summary=job_summary, compare_earlier=get_bool_var('COMPARE_TO_EARLIER_COMMIT', options, default=True), pull_request_build=get_var('PULL_REQUEST_BUILD', options) or 'merge', diff --git a/python/requirements-3.10.txt b/python/requirements-3.10.txt new file mode 100644 index 00000000..118eb45a --- /dev/null +++ b/python/requirements-3.10.txt @@ -0,0 +1,18 @@ +humanize==4.9.0 +junitparser==3.1.2 +lxml==5.1.0 +psutil==5.9.8 +PyGithub==2.2.0 + Deprecated==1.2.14 + wrapt==1.16.0 + PyJWT==2.8.0 + PyNaCl==1.5.0 + cffi==1.16.0 + pycparser==2.22 + requests==2.32.3 + certifi==2024.7.4 + charset-normalizer==3.3.2 + idna==3.7 + urllib3==2.2.2 + typing_extensions==4.12.2 + urllib3==2.2.2 diff --git a/python/requirements-3.11.txt b/python/requirements-3.11.txt new file mode 100644 index 00000000..118eb45a --- /dev/null +++ b/python/requirements-3.11.txt @@ -0,0 +1,18 @@ +humanize==4.9.0 +junitparser==3.1.2 +lxml==5.1.0 +psutil==5.9.8 +PyGithub==2.2.0 + Deprecated==1.2.14 + wrapt==1.16.0 + PyJWT==2.8.0 + PyNaCl==1.5.0 + cffi==1.16.0 + pycparser==2.22 + requests==2.32.3 + certifi==2024.7.4 + charset-normalizer==3.3.2 + idna==3.7 + urllib3==2.2.2 + typing_extensions==4.12.2 + urllib3==2.2.2 diff --git a/python/requirements-3.12.txt b/python/requirements-3.12.txt new file mode 100644 index 00000000..118eb45a --- /dev/null +++ b/python/requirements-3.12.txt @@ -0,0 +1,18 @@ +humanize==4.9.0 +junitparser==3.1.2 +lxml==5.1.0 +psutil==5.9.8 +PyGithub==2.2.0 + Deprecated==1.2.14 + wrapt==1.16.0 + PyJWT==2.8.0 + PyNaCl==1.5.0 + cffi==1.16.0 + pycparser==2.22 + requests==2.32.3 + certifi==2024.7.4 + charset-normalizer==3.3.2 + idna==3.7 + urllib3==2.2.2 + typing_extensions==4.12.2 + urllib3==2.2.2 diff --git a/python/requirements-3.7.txt b/python/requirements-3.7.txt new file mode 100644 index 00000000..5e51fed6 --- /dev/null +++ b/python/requirements-3.7.txt @@ -0,0 +1,22 @@ +humanize==4.6.0 + importlib-metadata==6.7.0 + typing_extensions==4.7.1 + zipp==3.15.0 +junitparser==3.1.2 +lxml==5.1.0 +psutil==5.9.8 +PyGithub==2.2.0 + Deprecated==1.2.14 + wrapt==1.16.0 + PyJWT==2.8.0 + typing_extensions==4.7.1 + PyNaCl==1.5.0 + cffi==1.15.1 + pycparser==2.21 + requests==2.31.0 + certifi==2024.7.4 + charset-normalizer==3.3.2 + idna==3.7 + urllib3==2.0.7 + typing_extensions==4.7.1 + urllib3==2.0.7 diff --git a/python/requirements-3.8.txt b/python/requirements-3.8.txt new file mode 100644 index 00000000..118eb45a --- /dev/null +++ b/python/requirements-3.8.txt @@ -0,0 +1,18 @@ +humanize==4.9.0 +junitparser==3.1.2 +lxml==5.1.0 +psutil==5.9.8 +PyGithub==2.2.0 + Deprecated==1.2.14 + wrapt==1.16.0 + PyJWT==2.8.0 + PyNaCl==1.5.0 + cffi==1.16.0 + pycparser==2.22 + requests==2.32.3 + certifi==2024.7.4 + charset-normalizer==3.3.2 + idna==3.7 + urllib3==2.2.2 + typing_extensions==4.12.2 + urllib3==2.2.2 diff --git a/python/requirements-3.9.txt b/python/requirements-3.9.txt new file mode 100644 index 00000000..118eb45a --- /dev/null +++ b/python/requirements-3.9.txt @@ -0,0 +1,18 @@ +humanize==4.9.0 +junitparser==3.1.2 +lxml==5.1.0 +psutil==5.9.8 +PyGithub==2.2.0 + Deprecated==1.2.14 + wrapt==1.16.0 + PyJWT==2.8.0 + PyNaCl==1.5.0 + cffi==1.16.0 + pycparser==2.22 + requests==2.32.3 + certifi==2024.7.4 + charset-normalizer==3.3.2 + idna==3.7 + urllib3==2.2.2 + typing_extensions==4.12.2 + urllib3==2.2.2 diff --git a/python/requirements-direct.txt b/python/requirements-direct.txt deleted file mode 100644 index 2b17f6ce..00000000 --- a/python/requirements-direct.txt +++ /dev/null @@ -1,5 +0,0 @@ -humanize==3.14.0 -junitparser==3.1.0 -lxml==4.9.3 -psutil==5.9.5 -PyGithub==2.1.1 diff --git a/python/requirements-post-3.7.txt b/python/requirements-post-3.7.txt new file mode 100644 index 00000000..118eb45a --- /dev/null +++ b/python/requirements-post-3.7.txt @@ -0,0 +1,18 @@ +humanize==4.9.0 +junitparser==3.1.2 +lxml==5.1.0 +psutil==5.9.8 +PyGithub==2.2.0 + Deprecated==1.2.14 + wrapt==1.16.0 + PyJWT==2.8.0 + PyNaCl==1.5.0 + cffi==1.16.0 + pycparser==2.22 + requests==2.32.3 + certifi==2024.7.4 + charset-normalizer==3.3.2 + idna==3.7 + urllib3==2.2.2 + typing_extensions==4.12.2 + urllib3==2.2.2 diff --git a/python/requirements.txt b/python/requirements.txt index 0904d348..da2e0001 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,21 +1,6 @@ -humanize==3.14.0 -junitparser==3.1.0 - future==0.18.3 -lxml==4.9.3 -psutil==5.9.5 -PyGithub==2.1.1 - Deprecated==1.2.14 - wrapt==1.15.0 - PyJWT==2.8.0 - PyNaCl==1.5.0 - cffi==1.15.1 - pycparser==2.21 - python-dateutil==2.8.2 - six==1.16.0 - requests==2.31.0 - certifi==2023.7.22 - charset-normalizer==3.3.0 - idna==3.4 - urllib3==2.0.6 - typing_extensions==4.7.1 - urllib3==2.0.6 +humanize==4.6.0; python_version <= '3.7' +humanize==4.9.0; python_version > '3.7' +junitparser==3.1.2 +lxml==5.1.0 +psutil==5.9.8 +PyGithub==2.2.0 diff --git a/python/test/constraints.txt b/python/test/constraints.txt index f42c5f7c..e69de29b 100644 --- a/python/test/constraints.txt +++ b/python/test/constraints.txt @@ -1,2 +0,0 @@ -# test_github.py fails with newer version -Werkzeug<2.1.0 \ No newline at end of file diff --git a/python/test/files/dart/json/tests.annotations b/python/test/files/dart/json/tests.annotations index aeac63b8..bcb21cb3 100644 --- a/python/test/files/dart/json/tests.annotations +++ b/python/test/files/dart/json/tests.annotations @@ -7,21 +7,10 @@ 'output': { 'title': '2 errors, 1 fail, 1 skipped, 16 pass in 0s', 'summary': - '20 tests\u2002\u2003\u200316 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20044 suites\u2003\u2003\u205f\u20041 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20041 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20032 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '20 tests\u2002\u2003\u200316 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n\u20074 suites\u2003\u2003\u20071 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20071 :x:\u2003\u20032 :fire:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REiy4TYkVDpTHqKrp74w+au3nvY97DNzhXwyfWNYwbBzbAQLA4FBa0ImwJabB+6j' 'PMxknpP8diDrhTK4pNwFmJFVGjz5BBp3LR31UwitIL/MsF/tekvi6wBOliZhf8/QAMgVR' diff --git a/python/test/files/dart/json/tests.results.json b/python/test/files/dart/json/tests.results.json new file mode 100644 index 00000000..aea47e5e --- /dev/null +++ b/python/test/files/dart/json/tests.results.json @@ -0,0 +1,89 @@ +{ + "title": "2 errors, 1 fail, 1 skipped, 16 pass in 0s", + "summary": "20 tests   16 :white_check_mark:  0s :stopwatch:\n 4 suites   1 :zzz:\n 1 files     1 :x:  2 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 4, + "duration": 0, + "tests": 20, + "tests_succ": 16, + "tests_skip": 1, + "tests_fail": 1, + "tests_error": 2, + "runs": 20, + "runs_succ": 16, + "runs_skip": 1, + "runs_fail": 1, + "runs_error": 2, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/cli_runner_test.dart", + "start_line": 21, + "end_line": 21, + "annotation_level": "warning", + "message": "json/tests.json [took 0s]", + "title": "Cli runner should have correct invocation failed", + "raw_details": "Expected: 'metrics [arguments] nope'\n Actual: 'metrics [arguments] '\n Which: is different. Both strings start the same, but the actual value is missing the following trailing characters: nope\n\npackage:test_api expect\ntest/src/cli/cli_runner_test.dart 22:7 main.." + }, + { + "path": "file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/utils/detect_sdk_path_test.dart", + "start_line": 16, + "end_line": 16, + "annotation_level": "failure", + "message": "json/tests.json [took 0s]", + "title": "detectSdkPath should return `null` if running inside VM with error", + "raw_details": "Exception: exception\ntest/src/cli/utils/detect_sdk_path_test.dart 21:7 main.." + }, + { + "path": "file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/utils/detect_sdk_path_test.dart", + "start_line": 46, + "end_line": 46, + "annotation_level": "failure", + "message": "json/tests.json [took 0s]", + "title": "detectSdkPath should return null if sdk path is not found inside environment PATH variable with error", + "raw_details": "Instance of 'Error'\ntest/src/cli/utils/detect_sdk_path_test.dart 67:9 main.." + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 skipped test, see \"Raw output\" for the name of the skipped test.", + "title": "1 skipped test found", + "raw_details": "Cli runner should have correct description" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 20 tests, see \"Raw output\" for the full list of tests.", + "title": "20 tests found", + "raw_details": "AnalysisOptions readIterableOfString returns iterables with data or not\nAnalysisOptions readMap returns map with data or not\nAnalysisOptions readMapOfMap returns map with data or not\nAnalysisOptions returns correct \"folderPath\" on posix platforms\nCheckUnnecessaryNullableCommand should have correct description\nCheckUnnecessaryNullableCommand should have correct invocation\nCheckUnnecessaryNullableCommand should have correct name\nCheckUnnecessaryNullableCommand should have correct usage\nCli runner run with version argument\nCli runner should have correct description\nCli runner should have correct invocation\nanalysisOptionsFromFile constructs AnalysisOptions from extends config\nanalysisOptionsFromFile constructs AnalysisOptions from invalid file\nanalysisOptionsFromFile constructs AnalysisOptions from null\nanalysisOptionsFromFile constructs AnalysisOptions from valid file with single import\nanalysisOptionsFromFile constructs AnalysisOptions from yaml file\ndetectSdkPath should find sdk path inside environment PATH variable\ndetectSdkPath should return `null` for non-Windows platforms\ndetectSdkPath should return `null` if running inside VM\ndetectSdkPath should return null if sdk path is not found inside environment PATH variable" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "4", + "duration": "0", + "tests": "20", + "tests_succ": "16", + "tests_skip": "1", + "tests_fail": "1", + "tests_error": "2", + "runs": "20", + "runs_succ": "16", + "runs_skip": "1", + "runs_fail": "1", + "runs_error": "2", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/bazel/suite-logs.annotations b/python/test/files/junit-xml/bazel/suite-logs.annotations index f0365bb0..61324679 100644 --- a/python/test/files/junit-xml/bazel/suite-logs.annotations +++ b/python/test/files/junit-xml/bazel/suite-logs.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors in 0s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' '0KoLbT1M4QgxIsC5g4q498liHp0OzvJnNLBbknOYhqEpAzpgyWjThBDwbFgEelVdSvKxn' 'CpaIOjO5yGvTssYsQWwRyITZ57+K9VZrHKvGWi95AKtCVo1fK6AX55nzvdAAAA\n', diff --git a/python/test/files/junit-xml/bazel/suite-logs.results.json b/python/test/files/junit-xml/bazel/suite-logs.results.json new file mode 100644 index 00000000..51a8871c --- /dev/null +++ b/python/test/files/junit-xml/bazel/suite-logs.results.json @@ -0,0 +1,80 @@ +{ + "title": "1 errors in 0s", + "summary": "1 tests   0 :white_check_mark:  0s :stopwatch:\n1 suites  0 :zzz:\n1 files    0 :x:  1 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 1, + "tests_succ": 0, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 1, + "runs": 1, + "runs_succ": 0, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 1, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "failure", + "message": "bazel/suite-logs.xml [took 0s]", + "title": "bazel/failing_absl_test with error", + "raw_details": "exited with error code 1" + }, + { + "path": "bazel/failing_absl_test", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "Test suite bazel/failing_absl_test has the following stdout output (see Raw output).", + "title": "Logging on stdout of test suite bazel/failing_absl_test", + "raw_details": "Generated test.log (if the file is not UTF-8, then this may be unreadable):\nexec ${PAGER:-/usr/bin/less} \"$0\" || exit 1\nExecuting tests from //bazel:failing_absl_test\n-----------------------------------------------------------------------------\nTraceback (most recent call last):\n File \"\", line 3, in \n import non_existent_package\nModuleNotFoundError: No module named 'non_existent_package'" + }, + { + "path": "bazel/failing_absl_test", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "Test suite bazel/failing_absl_test has the following stderr output (see Raw output).", + "title": "Logging on stderr of test suite bazel/failing_absl_test", + "raw_details": "Generated test.err" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 test, see \"Raw output\" for the name of the test.", + "title": "1 test found", + "raw_details": "bazel/failing_absl_test" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "1", + "tests_succ": "0", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "1", + "runs": "1", + "runs_succ": "0", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "1", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/jest/jest-junit.annotations b/python/test/files/junit-xml/jest/jest-junit.annotations index c521c6da..e032cabf 100644 --- a/python/test/files/junit-xml/jest/jest-junit.annotations +++ b/python/test/files/junit-xml/jest/jest-junit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '2 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/junit-xml/jest/jest-junit.results.json b/python/test/files/junit-xml/jest/jest-junit.results.json new file mode 100644 index 00000000..6fbc1003 --- /dev/null +++ b/python/test/files/junit-xml/jest/jest-junit.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 2 tests pass in 0s", + "summary": "2 tests   2 :white_check_mark:  0s :stopwatch:\n1 suites  0 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 2, + "tests_succ": 2, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 2, + "runs_succ": 2, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 tests, see \"Raw output\" for the full list of tests.", + "title": "2 tests found", + "raw_details": "Load widget via link\nMount iframe" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "2", + "tests_succ": "2", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "2", + "runs_succ": "2", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/junit.multiresult.annotations b/python/test/files/junit-xml/junit.multiresult.annotations index 7350421d..900001bf 100644 --- a/python/test/files/junit-xml/junit.multiresult.annotations +++ b/python/test/files/junit-xml/junit.multiresult.annotations @@ -7,32 +7,10 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 1s', 'summary': - '1 files\u2004\u20031 suites\u2004\u2003\u20021s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '4 tests\u20031 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '1 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '1 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n4 runs\u2006\u2003' - '-2 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '3 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '2 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '1 files\u2004\u20031 suites\u2004\u2003\u20021s :stopwatch:\n4 tests\u2003' + '1 :white_check_mark:\u20031 :zzz:\u20031 :x:\u20031 :fire:\n4 runs\u200a\u2003' + '-2 :white_check_mark:\u20033 :zzz:\u20032 :x:\u20031 :fire:\n\n' + 'Results for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KotfBTeRlCEONGPmYXKuPdlQhEujdvkrn4BkYTX9jQMU4RQoU1ogzgXcZXhKTmsgVFpf' '5S0AFnc2wSTHNoRI/5wehKL82S68d6fLmpcK5V/48pby2EF/JitEt+P6y+BE/eAAAA\n', diff --git a/python/test/files/junit-xml/junit.multiresult.results.json b/python/test/files/junit-xml/junit.multiresult.results.json new file mode 100644 index 00000000..64a295d0 --- /dev/null +++ b/python/test/files/junit-xml/junit.multiresult.results.json @@ -0,0 +1,80 @@ +{ + "title": "1 errors, 1 fail, 1 skipped, 1 pass in 1s", + "summary": "1 files  1 suites   1s :stopwatch:\n4 tests 1 :white_check_mark: 1 :zzz: 1 :x: 1 :fire:\n4 runs  -2 :white_check_mark: 3 :zzz: 2 :x: 1 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 1, + "tests": 4, + "tests_succ": 1, + "tests_skip": 1, + "tests_fail": 1, + "tests_error": 1, + "runs": 4, + "runs_succ": -2, + "runs_skip": 3, + "runs_fail": 2, + "runs_error": 1, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "test class", + "start_line": 0, + "end_line": 0, + "annotation_level": "failure", + "message": "junit.multiresult.xml [took 0s]", + "title": "test that errors (test class) with error", + "raw_details": "test teardown failure\nstdout" + }, + { + "path": "test class", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "junit.multiresult.xml [took 0s]", + "title": "test that fails (test class) failed", + "raw_details": "test failure\nAssertion failed" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 skipped test, see \"Raw output\" for the name of the skipped test.", + "title": "1 skipped test found", + "raw_details": "test class ‑ test that is skipped" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 4 tests, see \"Raw output\" for the full list of tests.", + "title": "4 tests found", + "raw_details": "test class ‑ test that errors\ntest class ‑ test that fails\ntest class ‑ test that is skipped\ntest class ‑ test that succeeds" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "1", + "tests": "4", + "tests_succ": "1", + "tests_skip": "1", + "tests_fail": "1", + "tests_error": "1", + "runs": "4", + "runs_succ": "-2", + "runs_skip": "3", + "runs_fail": "2", + "runs_error": "1", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/minimal-attributes.annotations b/python/test/files/junit-xml/minimal-attributes.annotations index c1920ccf..22e1f1f9 100644 --- a/python/test/files/junit-xml/minimal-attributes.annotations +++ b/python/test/files/junit-xml/minimal-attributes.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20031 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLTSx8jKGIMSNfMwClfHuAoJC92Z2MxeVoISjC5kGQl0A/8EWkHmwJuIYMR58Os11ry' '5wXn6LOODshGSgOiEQLRaDwdRemm3u5b+WuYllblvcag0+QlnE7YzeD8XajRvdAAAA\n', diff --git a/python/test/files/junit-xml/minimal-attributes.results.json b/python/test/files/junit-xml/minimal-attributes.results.json new file mode 100644 index 00000000..eaf13ac5 --- /dev/null +++ b/python/test/files/junit-xml/minimal-attributes.results.json @@ -0,0 +1,78 @@ +{ + "title": "1 errors, 1 fail, 1 skipped, 1 pass in 0s", + "summary": "4 tests   1 :white_check_mark:  0s :stopwatch:\n1 suites  1 :zzz:\n1 files    1 :x:  1 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 4, + "tests_succ": 1, + "tests_skip": 1, + "tests_fail": 1, + "tests_error": 1, + "runs": 4, + "runs_succ": 1, + "runs_skip": 1, + "runs_fail": 1, + "runs_error": 1, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "ClassName", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "minimal-attributes.xml", + "title": "failed_test (ClassName) failed" + }, + { + "path": "ClassName", + "start_line": 0, + "end_line": 0, + "annotation_level": "failure", + "message": "minimal-attributes.xml", + "title": "error_test (ClassName) with error" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 skipped test, see \"Raw output\" for the name of the skipped test.", + "title": "1 skipped test found", + "raw_details": "ClassName ‑ skipped_test" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 4 tests, see \"Raw output\" for the full list of tests.", + "title": "4 tests found", + "raw_details": "ClassName ‑ error_test\nClassName ‑ failed_test\nClassName ‑ skipped_test\nClassName ‑ test_name" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "4", + "tests_succ": "1", + "tests_skip": "1", + "tests_fail": "1", + "tests_error": "1", + "runs": "4", + "runs_succ": "1", + "runs_skip": "1", + "runs_fail": "1", + "runs_error": "1", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/mocha/latex-utensils.annotations b/python/test/files/junit-xml/mocha/latex-utensils.annotations index fe84823c..fe8ae8dc 100644 --- a/python/test/files/junit-xml/mocha/latex-utensils.annotations +++ b/python/test/files/junit-xml/mocha/latex-utensils.annotations @@ -7,29 +7,10 @@ 'output': { 'title': 'All 101 tests pass in 0s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003\u205f\u2004\u205f\u20041 ' - 'suites\u2004\u2003\u20020s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '101 tests\u2003101 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '0 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n109 runs\u2006\u2003' - '109 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '0 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '\u2007\u20071 files\u2004\u2003\u2007\u20071 suites\u2004\u2003\u2002' + '0s :stopwatch:\n101 tests\u2003101 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n109 runs\u200a\u2003109 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MMQ6AIBAEv' '0KoLaDUzxCCEC8imAMq498liIJ2N7O5OagBqwOdCB8IDQniC3NCGcG7jCxjHmKZGH9IhK' 'TUX62w9x/CSLAfoRE9VoPJ3c2xQks204qFu2Dhvqf8tkHMUC8SFknPC30yEpLlAAAA\n', diff --git a/python/test/files/junit-xml/mocha/latex-utensils.results.json b/python/test/files/junit-xml/mocha/latex-utensils.results.json new file mode 100644 index 00000000..1c406f7f --- /dev/null +++ b/python/test/files/junit-xml/mocha/latex-utensils.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 101 tests pass in 0s", + "summary": "  1 files    1 suites   0s :stopwatch:\n101 tests 101 :white_check_mark: 0 :zzz: 0 :x:\n109 runs  109 :white_check_mark: 0 :zzz: 0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 101, + "tests_succ": 101, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 109, + "runs_succ": 109, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 101 tests, see \"Raw output\" for the full list of tests.", + "title": "101 tests found", + "raw_details": "bibtexParser ‑ parse @comment @article @comment\nbibtexParser ‑ parse a simple bib file\nbibtexParser ‑ parse an entry with only key\nbibtexParser ‑ parse bib with abbreviation\nbibtexParser ‑ parse bib with comments\nbibtexParser ‑ parse entry with command\nbibtexParser ‑ parse entry with concat\nbibtexParser ‑ parse entry with empty citeky\nbibtexParser ‑ parse entry with quotes\nbibtexParser ‑ parse fields ending ,\nbibtexParser ‑ parse only @comment\nbibtexParser ‑ should not throw SyntaxError\nbibtexParser ‑ should throw SyntaxError\nlatexLogParser ‑ parse LaTeX log files\nlatexLogParser ‑ parse LaTeX log files generated with -halt-on-error\nlatexParser matchers findAll ‑ test latexParser.findAll\nlatexParser matchers findAllSeqences ‑ test latexParser.findAllSeqences\nlatexParser matchers latexParser findNodeAt ‑ test latexParser.findNodeAt\nlatexParser matchers latexParser findNodeAt ‑ test latexParser.findNodeAt with line and column\nlatexParser matchers latexParser findNodeAt ‑ test latexParser.findNodeAt with line and column for multiple lines\nlatexParser matchers pattern ‑ test latexParser.pattern\nlatexParser matchers pattern ‑ test latexParser.pattern.match\nlatexParser matchers type ‑ test that properties having a Node-related-type value are only content, args, and arg.\nlatexParser matchers type ‑ test the types of content, arg, and args.\nlatexParser other ‑ test type guard\nlatexParser other ‑ test type guard with assingment and never type\nlatexParser parse 2 ‑ parse Sch\\\"onbrunner Schlo\\ss{} Stra\\ss e\nlatexParser parse 2 ‑ parse \\\"\\i\nlatexParser parse 2 ‑ parse a\\\\b c\\newline\nlatexParser parse 2 ‑ parse space + \\begin{center}\nlatexParser parse 2 ‑ parse x {a} { b }d\nlatexParser parse 2 ‑ parse { a }d\nlatexParser parse ‑ parse $ $, including only spaces\nlatexParser parse ‑ parse $ a ^ b $\nlatexParser parse ‑ parse $$ $$\nlatexParser parse ‑ parse $1$\nlatexParser parse ‑ parse $\\left(1\\right]$\nlatexParser parse ‑ parse $\\left.1\\right]$\nlatexParser parse ‑ parse $a^b$\nlatexParser parse ‑ parse $a^b$ with {enableMathCharacterLocation: true}\nlatexParser parse ‑ parse \\( \\)\nlatexParser parse ‑ parse \\[ \\]\nlatexParser parse ‑ parse \\begin{align} \\begin{alignedat}\nlatexParser parse ‑ parse \\begin{align} \\begin{aligned}\nlatexParser parse ‑ parse \\begin{align} \\end{align}\nlatexParser parse ‑ parse \\begin{align}...\nlatexParser parse ‑ parse \\begin{center} \\begin{itemize}\nlatexParser parse ‑ parse \\begin{center}...\nlatexParser parse ‑ parse \\begin{center}\\endcommand\nlatexParser parse ‑ parse \\begin{lstlisting}...\nlatexParser parse ‑ parse \\begin{minted}...\nlatexParser parse ‑ parse \\begin{verbatim*}...\nlatexParser parse ‑ parse \\begin{verbatim}...\nlatexParser parse ‑ parse \\begin{verbatim}... 02\nlatexParser parse ‑ parse \\def\\abc [#1]#2 {#2#1abc}\nlatexParser parse ‑ parse \\def\\abc{abc}\nlatexParser parse ‑ parse \\href\nlatexParser parse ‑ parse \\label{a_b}\nlatexParser parse ‑ parse \\linebreakMyCommand\nlatexParser parse ‑ parse \\newlineMyCommand\nlatexParser parse ‑ parse \\node[label={abc}, efg]\nlatexParser parse ‑ parse \\par\\par\nlatexParser parse ‑ parse \\part\nlatexParser parse ‑ parse \\url\nlatexParser parse ‑ parse \\verb*|1|\nlatexParser parse ‑ parse \\verbatimfont{\\small}\nlatexParser parse ‑ parse \\verb|1|\nlatexParser parse ‑ parse a command whose name has @\nlatexParser parse ‑ parse a^b\nlatexParser parse ‑ parse a_b\nlatexParser parse ‑ parse an optional argument having only spaces\nlatexParser parse ‑ parse comments\nlatexParser parse ‑ parse empty preamble\nlatexParser parse ‑ parse invalid commands without error\nlatexParser parse ‑ parse newenvironment command\nlatexParser parse ‑ parse optional arguments having a tilde\nlatexParser parse ‑ parse optional arguments having spaces\nlatexParser parse ‑ parse preamble\nlatexParser parse ‑ parse unbalanced \\begin\nlatexParser parse ‑ parse unbalanced \\begin{aligned}\nlatexParser parse ‑ parse unbalanced \\end\nlatexParser parse ‑ parse unbalanced \\end{aligned}\nlatexParser parse ‑ parse { }, including only spaces\nlatexParser parse ‑ parse ~\nlatexParser parse ‑ should throw SyntaxError\nlatexParser stringify ‑ test latexParser.stringify a b\nlatexParser stringify ‑ test latexParser.stringify a b\nlatexParser stringify ‑ test latexParser.stringify a\\nb\nlatexParser stringify ‑ test latexParser.stringify a_b\nlatexParser stringify ‑ test latexParser.stringify newcommand 01\nlatexParser stringify ‑ test latexParser.stringify newcommand 02\nlatexParser stringify ‑ test latexParser.stringify newcommand 03\nlatexParser stringify ‑ test latexParser.stringify with lineBreak 01\nlatexParser stringify ‑ test latexParser.stringify with lineBreak 02\nlatexParser stringify ‑ test stringify $ \\sin x$\nlatexParser stringify ‑ test stringify $a^b$\nlatexParser stringify ‑ test stringify \\def\\abc [#1]#2 {#2#1abc}\nlatexParser stringify ‑ test stringify \\href[]{}{}\nlatexParser stringify ‑ test stringify \\href{}{}\nlatexParser stringify ‑ test stringify \\url\nlatexParser stringify ‑ test stringify a_b" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "101", + "tests_succ": "101", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "109", + "runs_succ": "109", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/no-attributes.annotations b/python/test/files/junit-xml/no-attributes.annotations index 40ace0fc..b06aa7a9 100644 --- a/python/test/files/junit-xml/no-attributes.annotations +++ b/python/test/files/junit-xml/no-attributes.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20031 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLTSx8jKGIMSNfMwClfHuAoJC92Z2MxeVoISjC5kGQl0A/8EWkHmwJuIYMR58Os11ry' '5wXn6LOODshGSgOiEQLRaDwdRemm3u5b+WuYllblvcag0+QlnE7YzeD8XajRvdAAAA\n', diff --git a/python/test/files/junit-xml/no-attributes.results.json b/python/test/files/junit-xml/no-attributes.results.json new file mode 100644 index 00000000..309f4237 --- /dev/null +++ b/python/test/files/junit-xml/no-attributes.results.json @@ -0,0 +1,43 @@ +{ + "title": "1 errors, 1 fail, 1 skipped, 1 pass in 0s", + "summary": "4 tests   1 :white_check_mark:  0s :stopwatch:\n1 suites  1 :zzz:\n1 files    1 :x:  1 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 4, + "tests_succ": 1, + "tests_skip": 1, + "tests_fail": 1, + "tests_error": 1, + "runs": 4, + "runs_succ": 1, + "runs_skip": 1, + "runs_fail": 1, + "runs_error": 1, + "commit": "commit sha" + }, + "annotations": [], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "4", + "tests_succ": "1", + "tests_skip": "1", + "tests_fail": "1", + "tests_error": "1", + "runs": "4", + "runs_succ": "1", + "runs_skip": "1", + "runs_fail": "1", + "runs_error": "1", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/no-cases-but-tests.annotations b/python/test/files/junit-xml/no-cases-but-tests.annotations index 5bb2fd92..0f39fd65 100644 --- a/python/test/files/junit-xml/no-cases-but-tests.annotations +++ b/python/test/files/junit-xml/no-cases-but-tests.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 2 skipped, 3 pass in 0s', 'summary': - '6 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20032 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr' '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MS+bmCo7V8ol1DePWg/th8SgcGE3YEtLhvmvMe7' 'ZeShJDETtcJPpfKAFHqkWxIhpMQfQ6975Z5yKXWuAqFrhuSXOe4AjSYnYT/HkBNCXSZd0' diff --git a/python/test/files/junit-xml/no-cases-but-tests.results.json b/python/test/files/junit-xml/no-cases-but-tests.results.json new file mode 100644 index 00000000..6c54ecf2 --- /dev/null +++ b/python/test/files/junit-xml/no-cases-but-tests.results.json @@ -0,0 +1,43 @@ +{ + "title": "1 fail, 2 skipped, 3 pass in 0s", + "summary": "6 tests   3 :white_check_mark:  0s :stopwatch:\n1 suites  2 :zzz:\n1 files    1 :x:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 6, + "tests_succ": 3, + "tests_skip": 2, + "tests_fail": 1, + "tests_error": 0, + "runs": 6, + "runs_succ": 3, + "runs_skip": 2, + "runs_fail": 1, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "6", + "tests_succ": "3", + "tests_skip": "2", + "tests_fail": "1", + "tests_error": "0", + "runs": "6", + "runs_succ": "3", + "runs_skip": "2", + "runs_fail": "1", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/no-cases.annotations b/python/test/files/junit-xml/no-cases.annotations index e49dc25c..8ce031e6 100644 --- a/python/test/files/junit-xml/no-cases.annotations +++ b/python/test/files/junit-xml/no-cases.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'No tests found', 'summary': - '0 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '0 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttDWyxiCEDfyMbtQGe8uQaNL92ZeMic49JZhVtOggAvmD9ZCOmOKFceK9cgs98LFmF' '7seHTCafSdsESJXkMlspgy9/BfayxijWXLpBAwV3iX4k3DdQOuuvQ/3QAAAA==\n', diff --git a/python/test/files/junit-xml/no-cases.results.json b/python/test/files/junit-xml/no-cases.results.json new file mode 100644 index 00000000..60a67d1f --- /dev/null +++ b/python/test/files/junit-xml/no-cases.results.json @@ -0,0 +1,43 @@ +{ + "title": "No tests found", + "summary": "0 tests   0 :white_check_mark:  0s :stopwatch:\n1 suites  0 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 0, + "tests_succ": 0, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 0, + "runs_succ": 0, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "0", + "tests_succ": "0", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "0", + "runs_succ": "0", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/non-junit.annotations b/python/test/files/junit-xml/non-junit.annotations index 6e1249ae..28635d0b 100644 --- a/python/test/files/junit-xml/non-junit.annotations +++ b/python/test/files/junit-xml/non-junit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 parse errors', 'summary': - '0 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '0 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n1 errors\n\n' - 'Results for commit commit s.\n\n' + '0 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n0 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n1 errors\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBBFf' '0Vm3aK2/UyIKQ35iBldRf+emNC4u+dcODc49JZhVcukgAvmBnOFvZDOmGLHemSWe+NizC' 'hOvAbhNPpBWKJE3VCJLKbMffzXGotYY9kyKQTMFfpSfGh4XnRU87HdAAAA\n', diff --git a/python/test/files/junit-xml/non-junit.results.json b/python/test/files/junit-xml/non-junit.results.json new file mode 100644 index 00000000..e55175ea --- /dev/null +++ b/python/test/files/junit-xml/non-junit.results.json @@ -0,0 +1,63 @@ +{ + "title": "1 parse errors", + "summary": "0 tests   0 :white_check_mark:  0s :stopwatch:\n0 suites  0 :zzz:\n1 files    0 :x:\n1 errors\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [ + { + "file": "non-junit.xml", + "message": "Invalid format." + } + ], + "suites": 0, + "duration": 0, + "tests": 0, + "tests_succ": 0, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 0, + "runs_succ": 0, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "non-junit.xml", + "start_line": 0, + "end_line": 0, + "annotation_level": "failure", + "message": "Invalid format.", + "title": "Error processing result file", + "raw_details": "non-junit.xml" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [ + { + "file": "non-junit.xml", + "message": "Invalid format." + } + ], + "suites": "0", + "duration": "0", + "tests": "0", + "tests_succ": "0", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "0", + "runs_succ": "0", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.fail.annotations b/python/test/files/junit-xml/pytest/junit.fail.annotations index c19a554d..969db188 100644 --- a/python/test/files/junit-xml/pytest/junit.fail.annotations +++ b/python/test/files/junit-xml/pytest/junit.fail.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 1 skipped, 3 pass in 2s', 'summary': - '5 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20031 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfzExssYghA3IpgFKuPdXVEUuzez2dm5BqM8H1hTMe4jhBemiCKAs4QtIR3CderzHn' '2UkkT3iQW25/kWWoD5CYXokExNBqPNvWuWuZu/WuIilrhsSbeuEAiexfws+HECiWEEJ90' diff --git a/python/test/files/junit-xml/pytest/junit.fail.results.json b/python/test/files/junit-xml/pytest/junit.fail.results.json new file mode 100644 index 00000000..78f353d8 --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.fail.results.json @@ -0,0 +1,71 @@ +{ + "title": "1 fail, 1 skipped, 3 pass in 2s", + "summary": "5 tests   3 :white_check_mark:  2s :stopwatch:\n1 suites  1 :zzz:\n1 files    1 :x:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 2, + "tests": 5, + "tests_succ": 3, + "tests_skip": 1, + "tests_fail": 1, + "tests_error": 0, + "runs": 5, + "runs_succ": 3, + "runs_skip": 1, + "runs_fail": 1, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "test/test_spark.py", + "start_line": 819, + "end_line": 819, + "annotation_level": "warning", + "message": "pytest/junit.fail.xml [took 7s]", + "title": "test_rsh_events (test.test_spark.SparkTests) failed", + "raw_details": "self = \n\n def test_rsh_events(self):\n > self.do_test_rsh_events(3)\n\n test_spark.py:821:\n _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\n test_spark.py:836: in do_test_rsh_events\n self.do_test_rsh(command, 143, events=events)\n test_spark.py:852: in do_test_rsh\n self.assertEqual(expected_result, res)\n E AssertionError: 143 != 0" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 skipped test, see \"Raw output\" for the name of the skipped test.", + "title": "1 skipped test found", + "raw_details": "test.test_spark.SparkTests ‑ test_get_available_devices" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 5 tests, see \"Raw output\" for the full list of tests.", + "title": "5 tests found", + "raw_details": "test.test_spark.SparkTests ‑ test_check_shape_compatibility\ntest.test_spark.SparkTests ‑ test_get_available_devices\ntest.test_spark.SparkTests ‑ test_get_col_info\ntest.test_spark.SparkTests ‑ test_rsh_events\ntest.test_spark.SparkTests ‑ test_rsh_with_non_zero_exit_code" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "2", + "tests": "5", + "tests_succ": "3", + "tests_skip": "1", + "tests_fail": "1", + "tests_error": "0", + "runs": "5", + "runs_succ": "3", + "runs_skip": "1", + "runs_fail": "1", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations index 0c3f8c23..d8c15b5c 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 10 tests pass, 4 skipped in 1m 12s', 'summary': - '14 tests\u2002\u2003\u200310 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1m 12s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u2003\u205f\u20044 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '14 tests\u2002\u2003\u200310 :white_check_mark:\u2003\u20031m 12s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20074 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr' 'yKuW1QEQZcJMaMhzRh1Fd290SxrN+/94R18Bq0cH1hTMe4C+BemgMKD3Qj7lpgWn7bugd' 'EFKaOpi1lhJ1NeZgGaRPlQiBazwbC9xXj/grcovcSfXOJvTVpjwBPki7lF8PMCyjZFT+I' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.results.json b/python/test/files/junit-xml/pytest/junit.gloo.elastic.results.json new file mode 100644 index 00000000..e25ad040 --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 10 tests pass, 4 skipped in 1m 12s", + "summary": "14 tests   10 :white_check_mark:  1m 12s :stopwatch:\n 1 suites   4 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 72, + "tests": 14, + "tests_succ": 10, + "tests_skip": 4, + "tests_fail": 0, + "tests_error": 0, + "runs": 14, + "runs_succ": 10, + "runs_skip": 4, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 4 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "4 skipped tests found", + "raw_details": "test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_all_hosts_blacklisted\ntest.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_min_hosts_timeout\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_all_hosts_blacklisted\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_min_hosts_timeout" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 14 tests, see \"Raw output\" for the full list of tests.", + "title": "14 tests found", + "raw_details": "test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_all_hosts_blacklisted\ntest.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_all_ranks_failure\ntest.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_fault_tolerance_without_scaling\ntest.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_hosts_added_and_removed\ntest.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_min_hosts_timeout\ntest.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_reset_limit\ntest.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ test_single_rank_failure\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_all_hosts_blacklisted\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_all_ranks_failure\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_fault_tolerance_without_scaling\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_hosts_added_and_removed\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_min_hosts_timeout\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_reset_limit\ntest.integration.test_elastic_torch.ElasticTorchTests ‑ test_single_rank_failure" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "72", + "tests": "14", + "tests_succ": "10", + "tests_skip": "4", + "tests_fail": "0", + "tests_error": "0", + "runs": "14", + "runs_succ": "10", + "runs_skip": "4", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations index 7de60202..19251c83 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 20 tests pass, 2 skipped in 10m 27s', 'summary': - '22 tests\u2002\u2003\u200320 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '10m 27s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '22 tests\u2002\u2003\u200320 :white_check_mark:\u2003\u200310m 27s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHZRBEy9jCGpsFDAFJuPdLaigW9/7zTv4DNvkeM+ainEXwGcYA0oP1hC2oiNBk4+jEC' '8MLigVTV3MCns0WcwSNhLlY0K0+BgMJhfj/QveovQSf3KJvzVltQZP8FzMLZKfF82Ojyn' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.results.json b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.results.json new file mode 100644 index 00000000..e648c19a --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 20 tests pass, 2 skipped in 10m 27s", + "summary": "22 tests   20 :white_check_mark:  10m 27s :stopwatch:\n 1 suites   2 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 627, + "tests": 22, + "tests_succ": 20, + "tests_skip": 2, + "tests_fail": 0, + "tests_error": 0, + "runs": 22, + "runs_succ": 20, + "runs_skip": 2, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "2 skipped tests found", + "raw_details": "test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_down_by_discovery\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_hosts_added_and_removed" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 22 tests, see \"Raw output\" for the full list of tests.", + "title": "22 tests found", + "raw_details": "test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_down_by_discovery\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_down_by_exception\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_no_spark_black_list\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_spark_blacklist_no_executor_reuse\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_spark_blacklist_no_executor_reuse_in_app\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_spark_blacklist_no_executor_reuse_same_task\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_spark_blacklist_no_node_reuse\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_spark_blacklist_no_node_reuse_in_app\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_auto_scale_up\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_all_hosts_lost\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_exception_all_ranks\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_exception_single_rank\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_exception_with_min_hosts_timeout\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_hosts_added_and_removed\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_no_spark_blacklist\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_spark_blacklist_no_executor_reuse\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_spark_blacklist_no_executor_reuse_in_app\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_spark_blacklist_no_executor_reuse_same_task\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_spark_blacklist_no_node_reuse\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_spark_blacklist_no_node_reuse_in_app\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_fault_tolerance_unused_hosts_added_and_removed\ntest.integration.test_elastic_spark_tensorflow.ElasticSparkTensorflowTests ‑ test_happy_run" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "627", + "tests": "22", + "tests_succ": "20", + "tests_skip": "2", + "tests_fail": "0", + "tests_error": "0", + "runs": "22", + "runs_succ": "20", + "runs_skip": "2", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations index d23a4c5c..ed382cfd 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 22 tests pass in 11m 10s', 'summary': - '22 tests\u2002\u2003\u200322 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '11m 10s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '22 tests\u2002\u2003\u200322 :white_check_mark:\u2003\u200311m 10s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLdRCEy9jCErc+MEsUBnv7oKIYrczbzMHV7CMhnesKhg3DmyCwaGwoDfCpi1J0GT9WN' 'cP9MZJ+TMz7GTSf68ELJkYETVGg25LRX9nwVu8vcCfXOBvTep1BUsQL2Ymwc8LUe9HxOM' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.results.json b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.results.json new file mode 100644 index 00000000..50f3c059 --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 22 tests pass in 11m 10s", + "summary": "22 tests   22 :white_check_mark:  11m 10s :stopwatch:\n 1 suites   0 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 670, + "tests": 22, + "tests_succ": 22, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 22, + "runs_succ": 22, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 22 tests, see \"Raw output\" for the full list of tests.", + "title": "22 tests found", + "raw_details": "test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_down_by_discovery\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_down_by_exception\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_no_spark_black_list\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_spark_blacklist_no_executor_reuse\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_spark_blacklist_no_executor_reuse_in_app\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_spark_blacklist_no_executor_reuse_same_task\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_spark_blacklist_no_node_reuse\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_spark_blacklist_no_node_reuse_in_app\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_auto_scale_up\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_all_hosts_lost\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_exception_all_ranks\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_exception_single_rank\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_exception_with_min_hosts_timeout\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_hosts_added_and_removed\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_no_spark_blacklist\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_spark_blacklist_no_executor_reuse\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_spark_blacklist_no_executor_reuse_in_app\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_spark_blacklist_no_executor_reuse_same_task\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_spark_blacklist_no_node_reuse\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_spark_blacklist_no_node_reuse_in_app\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_fault_tolerance_unused_hosts_added_and_removed\ntest.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ test_happy_run" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "670", + "tests": "22", + "tests_succ": "22", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "22", + "runs_succ": "22", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations index 6489a016..d122405b 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 80 tests pass, 17 skipped in 3m 25s', 'summary': - '97 tests\u2002\u2003\u200380 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3m 25s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u200317 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '97 tests\u2002\u2003\u200380 :white_check_mark:\u2003\u20033m 25s ' + ':stopwatch:\n\u20071 suites\u2003\u200317 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1QQ/VwmxIqGSmPUVXT3xrK03bz3De/gE6yj4R0rMsaNA/vB4FBY0IqwzCsSNFk/tv' 'ULvXFSkmnyaBbYfSD+TAJWMvFlRNQYDDr1Jf39Kz4iCd4i6d2c5qTeNrAE4WJmFvy8ADN' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.results.json b/python/test/files/junit-xml/pytest/junit.gloo.standalone.results.json new file mode 100644 index 00000000..1c12ddb8 --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 80 tests pass, 17 skipped in 3m 25s", + "summary": "97 tests   80 :white_check_mark:  3m 25s :stopwatch:\n 1 suites  17 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 205, + "tests": 97, + "tests_succ": 80, + "tests_skip": 17, + "tests_fail": 0, + "tests_error": 0, + "runs": 97, + "runs_succ": 80, + "runs_skip": 17, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 17 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "17 skipped tests found", + "raw_details": "test.test_run.RunTests ‑ test_js_run\ntest.test_run.RunTests ‑ test_mpi_run_full\ntest.test_run.RunTests ‑ test_mpi_run_minimal\ntest.test_run.RunTests ‑ test_mpi_run_on_large_cluster\ntest.test_run.RunTests ‑ test_mpi_run_with_both_paths\ntest.test_run.RunTests ‑ test_mpi_run_with_both_pythonpaths\ntest.test_run.RunTests ‑ test_mpi_run_with_env_path\ntest.test_run.RunTests ‑ test_mpi_run_with_env_pythonpath\ntest.test_run.RunTests ‑ test_mpi_run_with_non_zero_exit\ntest.test_run.RunTests ‑ test_mpi_run_with_os_environ\ntest.test_run.RunTests ‑ test_mpi_run_with_sys_path\ntest.test_run.RunTests ‑ test_mpi_run_with_sys_pythonpath\ntest.test_run.RunTests ‑ test_mpi_run_without_path\ntest.test_run.RunTests ‑ test_mpi_run_without_pythonpath\ntest.test_spark.SparkTests ‑ test_get_available_devices\ntest.test_spark.SparkTests ‑ test_happy_run_with_mpi\ntest.test_spark.SparkTests ‑ test_timeout_with_mpi" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 97 tests, see \"Raw output\" for the full list of tests.", + "title": "97 tests found", + "raw_details": "test.test_run.RunTests ‑ test_autotune_args\ntest.test_run.RunTests ‑ test_autotuning_with_fixed_param\ntest.test_run.RunTests ‑ test_config_file\ntest.test_run.RunTests ‑ test_config_file_override_args\ntest.test_run.RunTests ‑ test_generate_jsrun_rankfile\ntest.test_run.RunTests ‑ test_get_mpi_implementation\ntest.test_run.RunTests ‑ test_gloo_run_minimal\ntest.test_run.RunTests ‑ test_gloo_run_with_os_environ\ntest.test_run.RunTests ‑ test_hash\ntest.test_run.RunTests ‑ test_horovodrun_hostfile\ntest.test_run.RunTests ‑ test_host_hash\ntest.test_run.RunTests ‑ test_in_thread_args\ntest.test_run.RunTests ‑ test_js_run\ntest.test_run.RunTests ‑ test_library_args\ntest.test_run.RunTests ‑ test_logging_args\ntest.test_run.RunTests ‑ test_mpi_run_full\ntest.test_run.RunTests ‑ test_mpi_run_minimal\ntest.test_run.RunTests ‑ test_mpi_run_on_large_cluster\ntest.test_run.RunTests ‑ test_mpi_run_with_both_paths\ntest.test_run.RunTests ‑ test_mpi_run_with_both_pythonpaths\ntest.test_run.RunTests ‑ test_mpi_run_with_env_path\ntest.test_run.RunTests ‑ test_mpi_run_with_env_pythonpath\ntest.test_run.RunTests ‑ test_mpi_run_with_non_zero_exit\ntest.test_run.RunTests ‑ test_mpi_run_with_os_environ\ntest.test_run.RunTests ‑ test_mpi_run_with_sys_path\ntest.test_run.RunTests ‑ test_mpi_run_with_sys_pythonpath\ntest.test_run.RunTests ‑ test_mpi_run_without_path\ntest.test_run.RunTests ‑ test_mpi_run_without_pythonpath\ntest.test_run.RunTests ‑ test_on_event\ntest.test_run.RunTests ‑ test_params_args\ntest.test_run.RunTests ‑ test_run_controller\ntest.test_run.RunTests ‑ test_run_with_jsrun\ntest.test_run.RunTests ‑ test_safe_shell_exec_captures_last_line_wo_eol\ntest.test_run.RunTests ‑ test_safe_shell_exec_captures_stderr\ntest.test_run.RunTests ‑ test_safe_shell_exec_captures_stdout\ntest.test_run.RunTests ‑ test_safe_shell_exec_interrupts_on_event\ntest.test_run.RunTests ‑ test_safe_shell_exec_interrupts_on_parent_shutdown\ntest.test_run.RunTests ‑ test_safe_shell_exec_returns_exit_code\ntest.test_run.RunTests ‑ test_stall_check_args\ntest.test_run.RunTests ‑ test_timeline_args\ntest.test_run.RunTests ‑ test_validate_config_args\ntest.test_spark.SparkTests ‑ test_check_shape_compatibility\ntest.test_spark.SparkTests ‑ test_df_cache\ntest.test_spark.SparkTests ‑ test_driver_common_interfaces\ntest.test_spark.SparkTests ‑ test_driver_common_interfaces_fails\ntest.test_spark.SparkTests ‑ test_driver_common_interfaces_from_settings\ntest.test_spark.SparkTests ‑ test_driver_set_local_rank_to_index\ntest.test_spark.SparkTests ‑ test_get_available_devices\ntest.test_spark.SparkTests ‑ test_get_col_info\ntest.test_spark.SparkTests ‑ test_get_col_info_error_bad_shape\ntest.test_spark.SparkTests ‑ test_get_col_info_error_bad_size\ntest.test_spark.SparkTests ‑ test_get_metadata\ntest.test_spark.SparkTests ‑ test_gloo_exec_fn\ntest.test_spark.SparkTests ‑ test_gloo_exec_fn_provides_driver_with_local_rank\ntest.test_spark.SparkTests ‑ test_happy_run_elastic\ntest.test_spark.SparkTests ‑ test_happy_run_with_gloo\ntest.test_spark.SparkTests ‑ test_happy_run_with_mpi\ntest.test_spark.SparkTests ‑ test_hdfs_store_parse_url\ntest.test_spark.SparkTests ‑ test_host_hash\ntest.test_spark.SparkTests ‑ test_mpi_exec_fn_provides_driver_with_local_rank\ntest.test_spark.SparkTests ‑ test_mpirun_exec_fn\ntest.test_spark.SparkTests ‑ test_mpirun_not_found\ntest.test_spark.SparkTests ‑ test_prepare_data_compress_sparse\ntest.test_spark.SparkTests ‑ test_prepare_data_no_compression\ntest.test_spark.SparkTests ‑ test_rsh_event\ntest.test_spark.SparkTests ‑ test_rsh_events\ntest.test_spark.SparkTests ‑ test_rsh_with_non_zero_exit_code\ntest.test_spark.SparkTests ‑ test_rsh_with_zero_exit_code\ntest.test_spark.SparkTests ‑ test_spark_driver_host_discovery\ntest.test_spark.SparkTests ‑ test_spark_run_defaults_num_proc_to_spark_cores_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_defaults_num_proc_to_spark_cores_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_does_not_default_env_to_os_env_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_does_not_default_env_to_os_env_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_num_proc_precedes_spark_cores_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_num_proc_precedes_spark_cores_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_with_non_zero_exit_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_with_non_zero_exit_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_with_os_environ_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_with_path_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_task_service_abort_command\ntest.test_spark.SparkTests ‑ test_spark_task_service_abort_no_command\ntest.test_spark.SparkTests ‑ test_spark_task_service_env\ntest.test_spark.SparkTests ‑ test_spark_task_service_execute_command\ntest.test_spark.SparkTests ‑ test_sync_hdfs_store\ntest.test_spark.SparkTests ‑ test_task_fn_run_commands\ntest.test_spark.SparkTests ‑ test_task_fn_run_gloo_exec\ntest.test_spark.SparkTests ‑ test_task_service_check_for_command_start\ntest.test_spark.SparkTests ‑ test_task_service_wait_for_command_start_with_timeout\ntest.test_spark.SparkTests ‑ test_task_service_wait_for_command_start_without_timeout\ntest.test_spark.SparkTests ‑ test_timeout_with_gloo\ntest.test_spark.SparkTests ‑ test_timeout_with_mpi\ntest.test_spark.SparkTests ‑ test_to_list\ntest.test_spark.SparkTests ‑ test_train_val_split_col_boolean\ntest.test_spark.SparkTests ‑ test_train_val_split_col_integer\ntest.test_spark.SparkTests ‑ test_train_val_split_ratio" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "205", + "tests": "97", + "tests_succ": "80", + "tests_skip": "17", + "tests_fail": "0", + "tests_error": "0", + "runs": "97", + "runs_succ": "80", + "runs_skip": "17", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations index b34fc4c6..951ee896 100644 --- a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 12 tests pass, 12 skipped in 1m 9s', 'summary': - '24 tests\u2002\u2003\u200312 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1m 9s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u200312 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '24 tests\u2002\u2003\u200312 :white_check_mark:\u2003\u20031m 9s ' + ':stopwatch:\n\u20071 suites\u2003\u200312 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr' 'yKuW1REUJcJsaQhzRh1Fd29sR+z3bz3DW/nCvTkeM+qgnEXwCcYAwoPdiVsO2JafNzq5o' 'XBBSnjd/2ZBba/UQI0mTKJCdHiYzCsKRnvX/EWWfASWe/iPCetMeAJnou5WfDjBP7Rpw/' diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.results.json b/python/test/files/junit-xml/pytest/junit.gloo.static.results.json new file mode 100644 index 00000000..c6ef7e60 --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.gloo.static.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 12 tests pass, 12 skipped in 1m 9s", + "summary": "24 tests   12 :white_check_mark:  1m 9s :stopwatch:\n 1 suites  12 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 69, + "tests": 24, + "tests_succ": 12, + "tests_skip": 12, + "tests_fail": 0, + "tests_error": 0, + "runs": 24, + "runs_succ": 12, + "runs_skip": 12, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 12 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "12 skipped tests found", + "raw_details": "test.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_remote_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_remote_func" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 24 tests, see \"Raw output\" for the full list of tests.", + "title": "24 tests found", + "raw_details": "test.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_remote_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_remote_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_remote_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_remote_func" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "69", + "tests": "24", + "tests_succ": "12", + "tests_skip": "12", + "tests_fail": "0", + "tests_error": "0", + "runs": "24", + "runs_succ": "12", + "runs_skip": "12", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations index d1498f00..bd91c7c4 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 3 tests pass in 15s', 'summary': - '3 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '15s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u200315s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0K2ttAYGy9jCGLciGAWqIx3d/EL3Zt5yewwodEeetFUAnzE8MEYSQZ0NmHHzE9IX/vuwU' 'elSrHgxqL+xCTRFEITOXoMRfv20sxzN/+1i7PYxXlLuXXFwPAs4WcJxwk6KM9l3gAAAA=' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.results.json b/python/test/files/junit-xml/pytest/junit.mpi.integration.results.json new file mode 100644 index 00000000..7e102638 --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 3 tests pass in 15s", + "summary": "3 tests   3 :white_check_mark:  15s :stopwatch:\n1 suites  0 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 15, + "tests": 3, + "tests_succ": 3, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 3, + "runs_succ": 3, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 3 tests, see \"Raw output\" for the full list of tests.", + "title": "3 tests found", + "raw_details": "test.test_interactiverun.InteractiveRunTests ‑ test_failed_run\ntest.test_interactiverun.InteractiveRunTests ‑ test_happy_run\ntest.test_interactiverun.InteractiveRunTests ‑ test_happy_run_elastic" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "15", + "tests": "3", + "tests_succ": "3", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "3", + "runs_succ": "3", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations index e243980e..e217dc05 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 96 tests pass, 1 skipped in 3m 39s', 'summary': - '97 tests\u2002\u2003\u200396 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '3m 39s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u2003\u205f\u20041 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '97 tests\u2002\u2003\u200396 :white_check_mark:\u2003\u20033m 39s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20071 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLdRCo5cxBCFu/GAWqIx3d1FE7N7MS+bgGhZlec+qgnHrwSUYPQoHZiOsq44EXS6cXf' 'vCYL2UwTSfmWGPgUdoAQuJMgmFaDAa9Fsqhv0LPuLr3Zzlbs5r0qwrOIK4mJ0EPy/3HdY' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.results.json b/python/test/files/junit-xml/pytest/junit.mpi.standalone.results.json new file mode 100644 index 00000000..bf739c7e --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 96 tests pass, 1 skipped in 3m 39s", + "summary": "97 tests   96 :white_check_mark:  3m 39s :stopwatch:\n 1 suites   1 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 219, + "tests": 97, + "tests_succ": 96, + "tests_skip": 1, + "tests_fail": 0, + "tests_error": 0, + "runs": 97, + "runs_succ": 96, + "runs_skip": 1, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 skipped test, see \"Raw output\" for the name of the skipped test.", + "title": "1 skipped test found", + "raw_details": "test.test_spark.SparkTests ‑ test_get_available_devices" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 97 tests, see \"Raw output\" for the full list of tests.", + "title": "97 tests found", + "raw_details": "test.test_run.RunTests ‑ test_autotune_args\ntest.test_run.RunTests ‑ test_autotuning_with_fixed_param\ntest.test_run.RunTests ‑ test_config_file\ntest.test_run.RunTests ‑ test_config_file_override_args\ntest.test_run.RunTests ‑ test_generate_jsrun_rankfile\ntest.test_run.RunTests ‑ test_get_mpi_implementation\ntest.test_run.RunTests ‑ test_gloo_run_minimal\ntest.test_run.RunTests ‑ test_gloo_run_with_os_environ\ntest.test_run.RunTests ‑ test_hash\ntest.test_run.RunTests ‑ test_horovodrun_hostfile\ntest.test_run.RunTests ‑ test_host_hash\ntest.test_run.RunTests ‑ test_in_thread_args\ntest.test_run.RunTests ‑ test_js_run\ntest.test_run.RunTests ‑ test_library_args\ntest.test_run.RunTests ‑ test_logging_args\ntest.test_run.RunTests ‑ test_mpi_run_full\ntest.test_run.RunTests ‑ test_mpi_run_minimal\ntest.test_run.RunTests ‑ test_mpi_run_on_large_cluster\ntest.test_run.RunTests ‑ test_mpi_run_with_both_paths\ntest.test_run.RunTests ‑ test_mpi_run_with_both_pythonpaths\ntest.test_run.RunTests ‑ test_mpi_run_with_env_path\ntest.test_run.RunTests ‑ test_mpi_run_with_env_pythonpath\ntest.test_run.RunTests ‑ test_mpi_run_with_non_zero_exit\ntest.test_run.RunTests ‑ test_mpi_run_with_os_environ\ntest.test_run.RunTests ‑ test_mpi_run_with_sys_path\ntest.test_run.RunTests ‑ test_mpi_run_with_sys_pythonpath\ntest.test_run.RunTests ‑ test_mpi_run_without_path\ntest.test_run.RunTests ‑ test_mpi_run_without_pythonpath\ntest.test_run.RunTests ‑ test_on_event\ntest.test_run.RunTests ‑ test_params_args\ntest.test_run.RunTests ‑ test_run_controller\ntest.test_run.RunTests ‑ test_run_with_jsrun\ntest.test_run.RunTests ‑ test_safe_shell_exec_captures_last_line_wo_eol\ntest.test_run.RunTests ‑ test_safe_shell_exec_captures_stderr\ntest.test_run.RunTests ‑ test_safe_shell_exec_captures_stdout\ntest.test_run.RunTests ‑ test_safe_shell_exec_interrupts_on_event\ntest.test_run.RunTests ‑ test_safe_shell_exec_interrupts_on_parent_shutdown\ntest.test_run.RunTests ‑ test_safe_shell_exec_returns_exit_code\ntest.test_run.RunTests ‑ test_stall_check_args\ntest.test_run.RunTests ‑ test_timeline_args\ntest.test_run.RunTests ‑ test_validate_config_args\ntest.test_spark.SparkTests ‑ test_check_shape_compatibility\ntest.test_spark.SparkTests ‑ test_df_cache\ntest.test_spark.SparkTests ‑ test_driver_common_interfaces\ntest.test_spark.SparkTests ‑ test_driver_common_interfaces_fails\ntest.test_spark.SparkTests ‑ test_driver_common_interfaces_from_settings\ntest.test_spark.SparkTests ‑ test_driver_set_local_rank_to_index\ntest.test_spark.SparkTests ‑ test_get_available_devices\ntest.test_spark.SparkTests ‑ test_get_col_info\ntest.test_spark.SparkTests ‑ test_get_col_info_error_bad_shape\ntest.test_spark.SparkTests ‑ test_get_col_info_error_bad_size\ntest.test_spark.SparkTests ‑ test_get_metadata\ntest.test_spark.SparkTests ‑ test_gloo_exec_fn\ntest.test_spark.SparkTests ‑ test_gloo_exec_fn_provides_driver_with_local_rank\ntest.test_spark.SparkTests ‑ test_happy_run_elastic\ntest.test_spark.SparkTests ‑ test_happy_run_with_gloo\ntest.test_spark.SparkTests ‑ test_happy_run_with_mpi\ntest.test_spark.SparkTests ‑ test_hdfs_store_parse_url\ntest.test_spark.SparkTests ‑ test_host_hash\ntest.test_spark.SparkTests ‑ test_mpi_exec_fn_provides_driver_with_local_rank\ntest.test_spark.SparkTests ‑ test_mpirun_exec_fn\ntest.test_spark.SparkTests ‑ test_mpirun_not_found\ntest.test_spark.SparkTests ‑ test_prepare_data_compress_sparse\ntest.test_spark.SparkTests ‑ test_prepare_data_no_compression\ntest.test_spark.SparkTests ‑ test_rsh_event\ntest.test_spark.SparkTests ‑ test_rsh_events\ntest.test_spark.SparkTests ‑ test_rsh_with_non_zero_exit_code\ntest.test_spark.SparkTests ‑ test_rsh_with_zero_exit_code\ntest.test_spark.SparkTests ‑ test_spark_driver_host_discovery\ntest.test_spark.SparkTests ‑ test_spark_run_defaults_num_proc_to_spark_cores_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_defaults_num_proc_to_spark_cores_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_does_not_default_env_to_os_env_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_does_not_default_env_to_os_env_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_num_proc_precedes_spark_cores_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_num_proc_precedes_spark_cores_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_with_non_zero_exit_with_gloo\ntest.test_spark.SparkTests ‑ test_spark_run_with_non_zero_exit_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_with_os_environ_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_run_with_path_with_mpi\ntest.test_spark.SparkTests ‑ test_spark_task_service_abort_command\ntest.test_spark.SparkTests ‑ test_spark_task_service_abort_no_command\ntest.test_spark.SparkTests ‑ test_spark_task_service_env\ntest.test_spark.SparkTests ‑ test_spark_task_service_execute_command\ntest.test_spark.SparkTests ‑ test_sync_hdfs_store\ntest.test_spark.SparkTests ‑ test_task_fn_run_commands\ntest.test_spark.SparkTests ‑ test_task_fn_run_gloo_exec\ntest.test_spark.SparkTests ‑ test_task_service_check_for_command_start\ntest.test_spark.SparkTests ‑ test_task_service_wait_for_command_start_with_timeout\ntest.test_spark.SparkTests ‑ test_task_service_wait_for_command_start_without_timeout\ntest.test_spark.SparkTests ‑ test_timeout_with_gloo\ntest.test_spark.SparkTests ‑ test_timeout_with_mpi\ntest.test_spark.SparkTests ‑ test_to_list\ntest.test_spark.SparkTests ‑ test_train_val_split_col_boolean\ntest.test_spark.SparkTests ‑ test_train_val_split_col_integer\ntest.test_spark.SparkTests ‑ test_train_val_split_ratio" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "219", + "tests": "97", + "tests_succ": "96", + "tests_skip": "1", + "tests_fail": "0", + "tests_error": "0", + "runs": "97", + "runs_succ": "96", + "runs_skip": "1", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations index 687830fb..88026b3c 100644 --- a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations +++ b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 24 tests pass in 2m 4s', 'summary': - '24 tests\u2002\u2003\u200324 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 4s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '24 tests\u2002\u2003\u200324 :white_check_mark:\u2003\u20032m 4s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOwqAMBAFr' 'xK2tlCx8jISYsTFT2STVOLd3ajkY/dmHswJE67aQi+aSoD16CKMnqRDswdsOxZ8uXAmGK' 'xX6mcWPNjUUUwS10JoIkOfIb/HYthF8BWp93CWezivKbNt6Bi+Jews4boBWo1x8eMAAAA' diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.results.json b/python/test/files/junit-xml/pytest/junit.mpi.static.results.json new file mode 100644 index 00000000..9ce6e6b8 --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.mpi.static.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 24 tests pass in 2m 4s", + "summary": "24 tests   24 :white_check_mark:  2m 4s :stopwatch:\n 1 suites   0 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 124, + "tests": 24, + "tests_succ": 24, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 24, + "runs_succ": 24, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 24 tests, see \"Raw output\" for the full list of tests.", + "title": "24 tests found", + "raw_details": "test.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_gloo_remote_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_failure_mpi_remote_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_gloo_remote_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_local_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_local_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_mixed_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_mixed_func\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_remote_cmd\ntest.integration.test_static_run.StaticRunTests ‑ test_run_success_mpi_remote_func" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "124", + "tests": "24", + "tests_succ": "24", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "24", + "runs_succ": "24", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations index 863ba84b..309d9557 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 33 tests pass, 2 skipped in 2m 45s', 'summary': - '35 tests\u2002\u2003\u200333 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 45s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '35 tests\u2002\u2003\u200333 :white_check_mark:\u2003\u20032m 45s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KoLfxECy9jCErcKGAWqIx3d1GzavdmXjK7NLBOQfaiKoQMCSLDmFBF8C5j15KgK+azYR' 'hC0jqb5jULbGRqFkbBSqJkMSF6fAwmx8W8f8FbvL2LP7mLvzXtrYVI8CwRZiWPEwEjqVj' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.results.json b/python/test/files/junit-xml/pytest/junit.spark.integration.1.results.json new file mode 100644 index 00000000..da8b7e7a --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 33 tests pass, 2 skipped in 2m 45s", + "summary": "35 tests   33 :white_check_mark:  2m 45s :stopwatch:\n 1 suites   2 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 165, + "tests": 35, + "tests_succ": 33, + "tests_skip": 2, + "tests_fail": 0, + "tests_error": 0, + "runs": 35, + "runs_succ": 33, + "runs_skip": 2, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "2 skipped tests found", + "raw_details": "test.test_spark_keras.SparkKerasTests ‑ test_session\ntest.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic_fault_tolerant_fails" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 35 tests, see \"Raw output\" for the full list of tests.", + "title": "35 tests found", + "raw_details": "test.test_spark_keras.SparkKerasTests ‑ test_batch_generator_fn\ntest.test_spark_keras.SparkKerasTests ‑ test_calculate_shuffle_buffer_size\ntest.test_spark_keras.SparkKerasTests ‑ test_calculate_shuffle_buffer_size_small_row_size\ntest.test_spark_keras.SparkKerasTests ‑ test_convert_custom_sparse_to_dense_bare_keras_fn\ntest.test_spark_keras.SparkKerasTests ‑ test_custom_sparse_to_dense_fn\ntest.test_spark_keras.SparkKerasTests ‑ test_fit_model\ntest.test_spark_keras.SparkKerasTests ‑ test_fit_model_multiclass\ntest.test_spark_keras.SparkKerasTests ‑ test_keras_direct_parquet_train\ntest.test_spark_keras.SparkKerasTests ‑ test_keras_model_checkpoint_callback\ntest.test_spark_keras.SparkKerasTests ‑ test_model_serialization\ntest.test_spark_keras.SparkKerasTests ‑ test_prep_data_tf_keras_fn_with_sparse_col\ntest.test_spark_keras.SparkKerasTests ‑ test_prep_data_tf_keras_fn_without_sparse_col\ntest.test_spark_keras.SparkKerasTests ‑ test_prepare_data_bare_keras_fn\ntest.test_spark_keras.SparkKerasTests ‑ test_reshape\ntest.test_spark_keras.SparkKerasTests ‑ test_restore_from_checkpoint\ntest.test_spark_keras.SparkKerasTests ‑ test_serialize_param_value\ntest.test_spark_keras.SparkKerasTests ‑ test_session\ntest.test_spark_torch.SparkTorchTests ‑ test_calculate_loss_with_sample_weight\ntest.test_spark_torch.SparkTorchTests ‑ test_calculate_loss_without_sample_weight\ntest.test_spark_torch.SparkTorchTests ‑ test_calculate_shuffle_buffer_size\ntest.test_spark_torch.SparkTorchTests ‑ test_calculate_shuffle_buffer_size_small_row_size\ntest.test_spark_torch.SparkTorchTests ‑ test_construct_metric_value_holders_one_metric_for_all_labels\ntest.test_spark_torch.SparkTorchTests ‑ test_fit_model\ntest.test_spark_torch.SparkTorchTests ‑ test_get_metric_avgs\ntest.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic\ntest.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic_fault_tolerant\ntest.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic_fault_tolerant_fails\ntest.test_spark_torch.SparkTorchTests ‑ test_metric_class\ntest.test_spark_torch.SparkTorchTests ‑ test_prepare_np_data\ntest.test_spark_torch.SparkTorchTests ‑ test_pytorch_get_optimizer_with_unscaled_lr\ntest.test_spark_torch.SparkTorchTests ‑ test_restore_from_checkpoint\ntest.test_spark_torch.SparkTorchTests ‑ test_torch_direct_parquet_train\ntest.test_spark_torch.SparkTorchTests ‑ test_torch_param_serialize\ntest.test_spark_torch.SparkTorchTests ‑ test_transform_multi_class\ntest.test_spark_torch.SparkTorchTests ‑ test_update_metrics" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "165", + "tests": "35", + "tests_succ": "33", + "tests_skip": "2", + "tests_fail": "0", + "tests_error": "0", + "runs": "35", + "runs_succ": "33", + "runs_skip": "2", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations index cc90146a..8f977394 100644 --- a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 33 tests pass, 2 skipped in 2m 52s', 'summary': - '35 tests\u2002\u2003\u200333 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2m 52s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u2003\u205f\u20042 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '35 tests\u2002\u2003\u200333 :white_check_mark:\u2003\u20032m 52s ' + ':stopwatch:\n\u20071 suites\u2003\u2003\u20072 :zzz:\n\u20071 files\u2004\u2002' + '\u2003\u2003\u20070 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSwqAMAwFr' '1K6duEHEbxMKVUx+Kmk7Uq8u6mfVHeZeWF2OcDcO9mKIhPSBfAMXUDtwa4Rm5IETT6OVf' '2CcsGYaKpkJtjI8L8aNMwkchY9osXHYFi5GO9f8Bapd/End/G3ZuyygCd4LuFGLY8TfGY' diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.results.json b/python/test/files/junit-xml/pytest/junit.spark.integration.2.results.json new file mode 100644 index 00000000..d9c7ee99 --- /dev/null +++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 33 tests pass, 2 skipped in 2m 52s", + "summary": "35 tests   33 :white_check_mark:  2m 52s :stopwatch:\n 1 suites   2 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 172, + "tests": 35, + "tests_succ": 33, + "tests_skip": 2, + "tests_fail": 0, + "tests_error": 0, + "runs": 35, + "runs_succ": 33, + "runs_skip": 2, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "2 skipped tests found", + "raw_details": "test.test_spark_keras.SparkKerasTests ‑ test_session\ntest.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic_fault_tolerant_fails" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 35 tests, see \"Raw output\" for the full list of tests.", + "title": "35 tests found", + "raw_details": "test.test_spark_keras.SparkKerasTests ‑ test_batch_generator_fn\ntest.test_spark_keras.SparkKerasTests ‑ test_calculate_shuffle_buffer_size\ntest.test_spark_keras.SparkKerasTests ‑ test_calculate_shuffle_buffer_size_small_row_size\ntest.test_spark_keras.SparkKerasTests ‑ test_convert_custom_sparse_to_dense_bare_keras_fn\ntest.test_spark_keras.SparkKerasTests ‑ test_custom_sparse_to_dense_fn\ntest.test_spark_keras.SparkKerasTests ‑ test_fit_model\ntest.test_spark_keras.SparkKerasTests ‑ test_fit_model_multiclass\ntest.test_spark_keras.SparkKerasTests ‑ test_keras_direct_parquet_train\ntest.test_spark_keras.SparkKerasTests ‑ test_keras_model_checkpoint_callback\ntest.test_spark_keras.SparkKerasTests ‑ test_model_serialization\ntest.test_spark_keras.SparkKerasTests ‑ test_prep_data_tf_keras_fn_with_sparse_col\ntest.test_spark_keras.SparkKerasTests ‑ test_prep_data_tf_keras_fn_without_sparse_col\ntest.test_spark_keras.SparkKerasTests ‑ test_prepare_data_bare_keras_fn\ntest.test_spark_keras.SparkKerasTests ‑ test_reshape\ntest.test_spark_keras.SparkKerasTests ‑ test_restore_from_checkpoint\ntest.test_spark_keras.SparkKerasTests ‑ test_serialize_param_value\ntest.test_spark_keras.SparkKerasTests ‑ test_session\ntest.test_spark_torch.SparkTorchTests ‑ test_calculate_loss_with_sample_weight\ntest.test_spark_torch.SparkTorchTests ‑ test_calculate_loss_without_sample_weight\ntest.test_spark_torch.SparkTorchTests ‑ test_calculate_shuffle_buffer_size\ntest.test_spark_torch.SparkTorchTests ‑ test_calculate_shuffle_buffer_size_small_row_size\ntest.test_spark_torch.SparkTorchTests ‑ test_construct_metric_value_holders_one_metric_for_all_labels\ntest.test_spark_torch.SparkTorchTests ‑ test_fit_model\ntest.test_spark_torch.SparkTorchTests ‑ test_get_metric_avgs\ntest.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic\ntest.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic_fault_tolerant\ntest.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic_fault_tolerant_fails\ntest.test_spark_torch.SparkTorchTests ‑ test_metric_class\ntest.test_spark_torch.SparkTorchTests ‑ test_prepare_np_data\ntest.test_spark_torch.SparkTorchTests ‑ test_pytorch_get_optimizer_with_unscaled_lr\ntest.test_spark_torch.SparkTorchTests ‑ test_restore_from_checkpoint\ntest.test_spark_torch.SparkTorchTests ‑ test_torch_direct_parquet_train\ntest.test_spark_torch.SparkTorchTests ‑ test_torch_param_serialize\ntest.test_spark_torch.SparkTorchTests ‑ test_transform_multi_class\ntest.test_spark_torch.SparkTorchTests ‑ test_update_metrics" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "172", + "tests": "35", + "tests_succ": "33", + "tests_skip": "2", + "tests_fail": "0", + "tests_error": "0", + "runs": "35", + "runs_succ": "33", + "runs_skip": "2", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations index 59ed37ac..78adb578 100644 --- a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations +++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 2s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8d6igSz8TYkZLlrHqKfr3VlOz25t5MBdfQCvLR9Y1jFsPrsDsUTgwB2FPSIcL15D3ZL' '2Uf7HBSaItYhGgf0IhGkwG/ZF7Yda5l79a5CoWuW5Js+/gCNJidhX8fgDdy7133QAAAA=' diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.results.json b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.results.json new file mode 100644 index 00000000..a31a32e9 --- /dev/null +++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 5 tests pass in 2s", + "summary": "5 tests   5 :white_check_mark:  2s :stopwatch:\n1 suites  0 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 2, + "tests": 5, + "tests_succ": 5, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 5, + "runs_succ": 5, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 5 tests, see \"Raw output\" for the full list of tests.", + "title": "5 tests found", + "raw_details": "uk.co.gresearch.spark.diff.DiffOptionsSuite ‑ diff options diff value\nuk.co.gresearch.spark.diff.DiffOptionsSuite ‑ diff options left and right prefixes\nuk.co.gresearch.spark.diff.DiffOptionsSuite ‑ diff options with change column name same as diff column\nuk.co.gresearch.spark.diff.DiffOptionsSuite ‑ diff options with empty diff column name\nuk.co.gresearch.spark.diff.DiffOptionsSuite ‑ fluent methods of diff options" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "2", + "tests": "5", + "tests_succ": "5", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "5", + "runs_succ": "5", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.annotations b/python/test/files/junit-xml/testsuite-in-testsuite.annotations index 7c5453ea..e96e0ecc 100644 --- a/python/test/files/junit-xml/testsuite-in-testsuite.annotations +++ b/python/test/files/junit-xml/testsuite-in-testsuite.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 4s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '4 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20034s ' + ':stopwatch:\n4 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx' 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA=' diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.results.json b/python/test/files/junit-xml/testsuite-in-testsuite.results.json new file mode 100644 index 00000000..d7994ddf --- /dev/null +++ b/python/test/files/junit-xml/testsuite-in-testsuite.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 5 tests pass in 4s", + "summary": "5 tests   5 :white_check_mark:  4s :stopwatch:\n4 suites  0 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 4, + "duration": 4, + "tests": 5, + "tests_succ": 5, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 5, + "runs_succ": 5, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 5 tests, see \"Raw output\" for the full list of tests.", + "title": "5 tests found", + "raw_details": "someName ‑ TestCase1\nsomeName ‑ TestCase2\nsomeName ‑ TestCase3\nsomeName ‑ TestCase4\nsomeName ‑ TestCase5" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "4", + "duration": "4", + "tests": "5", + "tests_succ": "5", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "5", + "runs_succ": "5", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/testsuite-root.annotations b/python/test/files/junit-xml/testsuite-root.annotations index 7c5453ea..e96e0ecc 100644 --- a/python/test/files/junit-xml/testsuite-root.annotations +++ b/python/test/files/junit-xml/testsuite-root.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 5 tests pass in 4s', 'summary': - '5 tests\u2002\u2003\u20035 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '4 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20035 :white_check_mark:\u2003\u20034s ' + ':stopwatch:\n4 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf' '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx' 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA=' diff --git a/python/test/files/junit-xml/testsuite-root.results.json b/python/test/files/junit-xml/testsuite-root.results.json new file mode 100644 index 00000000..d7994ddf --- /dev/null +++ b/python/test/files/junit-xml/testsuite-root.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 5 tests pass in 4s", + "summary": "5 tests   5 :white_check_mark:  4s :stopwatch:\n4 suites  0 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 4, + "duration": 4, + "tests": 5, + "tests_succ": 5, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 5, + "runs_succ": 5, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 5 tests, see \"Raw output\" for the full list of tests.", + "title": "5 tests found", + "raw_details": "someName ‑ TestCase1\nsomeName ‑ TestCase2\nsomeName ‑ TestCase3\nsomeName ‑ TestCase4\nsomeName ‑ TestCase5" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "4", + "duration": "4", + "tests": "5", + "tests_succ": "5", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "5", + "runs_succ": "5", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/tst/disabled.annotations b/python/test/files/junit-xml/tst/disabled.annotations index 3f1f6bac..6bc432e1 100644 --- a/python/test/files/junit-xml/tst/disabled.annotations +++ b/python/test/files/junit-xml/tst/disabled.annotations @@ -7,33 +7,10 @@ 'output': { 'title': '1 errors, 19 fail, 5 skipped, 6 pass in 0s', 'summary': - '\u205f\u20041 files\u2004\u2003\u205f\u20042 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '31 tests\u2003\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '5 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '19 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n31 runs\u2006\u2003' - '11 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '19 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '\u20071 files\u2004\u2003\u20072 suites\u2004\u2003\u20020s ' + ':stopwatch:\n31 tests\u2003\u20076 :white_check_mark:\u20035 :zzz:\u2003' + '19 :x:\u20031 :fire:\n31 runs\u200a\u200311 :white_check_mark:\u2003' + '0 :zzz:\u200319 :x:\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NQQqAIBBFr' 'yKuW2RRUJcJsaIhyxh1Fd29sdLczXsf3px8Bj1Z3jNRMG49uAcqgtGjdGB2wpKQBhemWk' 'QYrFeKTPuLFQ4STRKzBB3aXTITosHvHfo9FcMdg+IXb7CMnPcekeeU2TZwBN/F7CL5dQP' diff --git a/python/test/files/junit-xml/tst/disabled.results.json b/python/test/files/junit-xml/tst/disabled.results.json new file mode 100644 index 00000000..dbefe529 --- /dev/null +++ b/python/test/files/junit-xml/tst/disabled.results.json @@ -0,0 +1,242 @@ +{ + "title": "1 errors, 19 fail, 5 skipped, 6 pass in 0s", + "summary": " 1 files   2 suites   0s :stopwatch:\n31 tests  6 :white_check_mark: 5 :zzz: 19 :x: 1 :fire:\n31 runs  11 :white_check_mark: 0 :zzz: 19 :x: 1 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 2, + "duration": 0, + "tests": 31, + "tests_succ": 6, + "tests_skip": 5, + "tests_fail": 19, + "tests_error": 1, + "runs": 31, + "runs_succ": 11, + "runs_skip": 0, + "runs_fail": 19, + "runs_error": 1, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "factorial_of_value_from_fixture failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/main.cpp:72: error: check_eq(3628800, 3628801)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "positive_arguments_must_produce_expected_result failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/main.cpp:45: error: check_ne(6, 6)hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "failure", + "message": "tst/disabled.xml [took 0s]", + "title": "test_which_throws_unknown_exception with error", + "raw_details": "uncaught (anonymous namespace)::some_unknown_exception" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "positive_arguments_must_produce_expected_result[2] failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/main.cpp:85: error: check(false)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "factorial_of_value_from_fixture[0] failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/main.cpp:109: error: expected 2" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "test_which_fails_check_eq_with_custom_message failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/main.cpp:62: error: check_eq(6, 7)hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_ge_print failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:59: error: check_ge(2, 3)failed!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_ge failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:55: error: check_ge(2, 3)Hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_gt_print failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:43: error: check_gt(2, 2)failed!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_lt_print failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:35: error: check_lt(2, 2)failed!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_print failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:11: error: failed!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_gt failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:39: error: check_gt(2, 2)Hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:7: error: Hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_le_print failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:51: error: check_le(2, 1)failed!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_eq failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:15: error: check_eq(1, 2)Hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_eq_print failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:19: error: check_eq(1, 2)failed!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_le failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:47: error: check_le(2, 1)Hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_ne failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:23: error: check_ne(2, 2)Hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_lt failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:31: error: check_lt(2, 2)Hello world!" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tst/disabled.xml [took 0s]", + "title": "check_ne_print failed", + "raw_details": "/home/ivan/prj/tst/tests/failed/checks.cpp:27: error: check_ne(2, 2)failed!" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 5 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "5 skipped tests found", + "raw_details": "disabled_param_test[0]\ndisabled_param_test[1]\ndisabled_param_test[2]\ndisabled_param_test[3]\ndisabled_test" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 31 tests, see \"Raw output\" for the full list of tests.", + "title": "31 tests found", + "raw_details": "check\ncheck_eq\ncheck_eq_print\ncheck_ge\ncheck_ge_print\ncheck_gt\ncheck_gt_print\ncheck_le\ncheck_le_print\ncheck_lt\ncheck_lt_print\ncheck_ne\ncheck_ne_print\ncheck_print\ndisabled_param_test[0]\ndisabled_param_test[1]\ndisabled_param_test[2]\ndisabled_param_test[3]\ndisabled_test\nfactorial_of_value_from_fixture\nfactorial_of_value_from_fixture[0]\nfactorial_of_value_from_fixture[1]\nfactorial_of_value_from_fixture[2]\nfactorial_of_value_from_fixture[3]\npositive_arguments_must_produce_expected_result\npositive_arguments_must_produce_expected_result[0]\npositive_arguments_must_produce_expected_result[1]\npositive_arguments_must_produce_expected_result[2]\npositive_arguments_must_produce_expected_result[3]\ntest_which_fails_check_eq_with_custom_message\ntest_which_throws_unknown_exception" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "2", + "duration": "0", + "tests": "31", + "tests_succ": "6", + "tests_skip": "5", + "tests_fail": "19", + "tests_error": "1", + "runs": "31", + "runs_succ": "11", + "runs_skip": "0", + "runs_fail": "19", + "runs_error": "1", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/unsupported-unicode.annotations b/python/test/files/junit-xml/unsupported-unicode.annotations index fa0a557e..5953a144 100644 --- a/python/test/files/junit-xml/unsupported-unicode.annotations +++ b/python/test/files/junit-xml/unsupported-unicode.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '2 errors, 2 fail, 2 skipped, 1 pass in 8s', 'summary': - '7 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '8s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20032 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20032 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20032 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '7 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20038s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '2 :x:\u2003\u20032 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYXXTReBlDUGIjgikwGe9uQVDc/ntt3skV6MXxkbUN4y6Af2EOKDxYQzgQ0sHHU1/25I' 'KU+TeLDQ4S3SuUAP0TC6LFbDCY0ouzzj381RJXscR1S9p9B0+QF3Or4NcNSlhwMN0AAAA' diff --git a/python/test/files/junit-xml/unsupported-unicode.results.json b/python/test/files/junit-xml/unsupported-unicode.results.json new file mode 100644 index 00000000..e9316fc2 --- /dev/null +++ b/python/test/files/junit-xml/unsupported-unicode.results.json @@ -0,0 +1,98 @@ +{ + "title": "2 errors, 2 fail, 2 skipped, 1 pass in 8s", + "summary": "7 tests   1 :white_check_mark:  8s :stopwatch:\n1 suites  2 :zzz:\n1 files    2 :x:  2 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 8, + "tests": 7, + "tests_succ": 1, + "tests_skip": 2, + "tests_fail": 2, + "tests_error": 2, + "runs": 7, + "runs_succ": 1, + "runs_skip": 2, + "runs_fail": 2, + "runs_error": 2, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "test/test-4.py", + "start_line": 4, + "end_line": 4, + "annotation_level": "warning", + "message": "unsupported-unicode.xml [took 1s]", + "title": "test 4 failed", + "raw_details": "Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓\nfailed" + }, + { + "path": "test/test-5.py", + "start_line": 5, + "end_line": 5, + "annotation_level": "warning", + "message": "unsupported-unicode.xml [took 1s]", + "title": "test 5 failed", + "raw_details": "message\nSome unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓" + }, + { + "path": "test/test-6.py", + "start_line": 6, + "end_line": 6, + "annotation_level": "failure", + "message": "unsupported-unicode.xml [took 1s]", + "title": "test 6 with error", + "raw_details": "Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓\nerror" + }, + { + "path": "test/test-7.py", + "start_line": 7, + "end_line": 7, + "annotation_level": "failure", + "message": "unsupported-unicode.xml [took 1s]", + "title": "test 7 with error", + "raw_details": "message\nSome unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "2 skipped tests found", + "raw_details": "test 2\ntest 3" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 7 tests, see \"Raw output\" for the full list of tests.", + "title": "7 tests found", + "raw_details": "test 1 헴䜝헱홐㣇㿷䔭\\U0001237a\\U000214ff\\U00020109㦓\ntest 2\ntest 3\ntest 4\ntest 5\ntest 6\ntest 7" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "8", + "tests": "7", + "tests_succ": "1", + "tests_skip": "2", + "tests_fail": "2", + "tests_error": "2", + "runs": "7", + "runs_succ": "1", + "runs_skip": "2", + "runs_fail": "2", + "runs_error": "2", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/with-xml-entities.annotations b/python/test/files/junit-xml/with-xml-entities.annotations index b8a65779..bc1a93f4 100644 --- a/python/test/files/junit-xml/with-xml-entities.annotations +++ b/python/test/files/junit-xml/with-xml-entities.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 2 skipped in 0s', 'summary': - '4 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20032 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ5AMBBFr' '9J0bYFYuYw0RUyUyky7Enc3qmV2/72fvFPP4CbSvWoqpSlC+GCMaAL4nbFm5CM8V1f2QN' 'FaeQ60wsGi/cRswOXaKyZEj9lg3EvvmTL38l9LLGKJZcv6bYPAkJeixejrBpBXIV3dAAA' diff --git a/python/test/files/junit-xml/with-xml-entities.results.json b/python/test/files/junit-xml/with-xml-entities.results.json new file mode 100644 index 00000000..382cfc01 --- /dev/null +++ b/python/test/files/junit-xml/with-xml-entities.results.json @@ -0,0 +1,80 @@ +{ + "title": "1 errors, 1 fail, 2 skipped in 0s", + "summary": "4 tests   0 :white_check_mark:  0s :stopwatch:\n1 suites  2 :zzz:\n1 files    1 :x:  1 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 4, + "tests_succ": 0, + "tests_skip": 2, + "tests_fail": 1, + "tests_error": 1, + "runs": 4, + "runs_succ": 0, + "runs_skip": 2, + "runs_fail": 1, + "runs_error": 1, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "with-xml-entities.xml [took 0s]", + "title": "Test with 'apostrophe' in the test name failed", + "raw_details": "A message with 'apostrophes'\nContent with 'apostrophes'" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "failure", + "message": "with-xml-entities.xml [took 0s]", + "title": "Test with & in the test name with error", + "raw_details": "A message with &\nContent with &" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "2 skipped tests found", + "raw_details": "Test with \"quotes\" in the test name\nTest with < and > in the test name" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 4 tests, see \"Raw output\" for the full list of tests.", + "title": "4 tests found", + "raw_details": "Test with \"quotes\" in the test name\nTest with & in the test name\nTest with 'apostrophe' in the test name\nTest with < and > in the test name" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "4", + "tests_succ": "0", + "tests_skip": "2", + "tests_fail": "1", + "tests_error": "1", + "runs": "4", + "runs_succ": "0", + "runs_skip": "2", + "runs_fail": "1", + "runs_error": "1", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/junit-xml/xunit/xunit.annotations b/python/test/files/junit-xml/xunit/xunit.annotations index 82a3ca7f..7aeb7110 100644 --- a/python/test/files/junit-xml/xunit/xunit.annotations +++ b/python/test/files/junit-xml/xunit/xunit.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 2 tests pass in 0s', 'summary': - '2 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '2 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr' '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg' 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n', diff --git a/python/test/files/junit-xml/xunit/xunit.results.json b/python/test/files/junit-xml/xunit/xunit.results.json new file mode 100644 index 00000000..2aef75df --- /dev/null +++ b/python/test/files/junit-xml/xunit/xunit.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 2 tests pass in 0s", + "summary": "2 tests   2 :white_check_mark:  0s :stopwatch:\n1 suites  0 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 2, + "tests_succ": 2, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 2, + "runs_succ": 2, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 tests, see \"Raw output\" for the full list of tests.", + "title": "2 tests found", + "raw_details": "mytestapp.Tests.AttriubteTests.GetTestNoFeature\nmytestapp.Tests.AttriubteTests.SetTestNoFeature" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "2", + "tests_succ": "2", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "2", + "runs_succ": "2", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/mocha/tests.annotations b/python/test/files/mocha/tests.annotations index 4f473019..9d7ae538 100644 --- a/python/test/files/mocha/tests.annotations +++ b/python/test/files/mocha/tests.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 1 skipped, 2 pass in 12s', 'summary': - '5 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '12s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20031 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '5 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u200312s ' + ':stopwatch:\n1 suites\u2003\u20031 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr' '0KobSSx8TKGoMaNfMwClfHu8hXs3sxm56Y7yM3SmYwDodaD+2D1yB0YHZEFDhcXb1Pdi/' 'VCBMGaOOEq31nsHORPbIgGi0Gvay/OPpe51RJ3scR9SxilwAUoi9iD0+cFI3viF94AAAA' diff --git a/python/test/files/mocha/tests.results.json b/python/test/files/mocha/tests.results.json new file mode 100644 index 00000000..bc7130e4 --- /dev/null +++ b/python/test/files/mocha/tests.results.json @@ -0,0 +1,80 @@ +{ + "title": "1 errors, 1 fail, 1 skipped, 2 pass in 12s", + "summary": "5 tests   2 :white_check_mark:  12s :stopwatch:\n1 suites  1 :zzz:\n1 files    1 :x:  1 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 12, + "tests": 5, + "tests_succ": 2, + "tests_skip": 1, + "tests_fail": 1, + "tests_error": 1, + "runs": 5, + "runs_succ": 2, + "runs_skip": 1, + "runs_fail": 1, + "runs_error": 1, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/home/runner/work/mocha/mocha/test/unit/runner.spec.js", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "tests.json", + "title": "Runner instance method grep() should update the runner.total with number of matched tests failed", + "raw_details": "Required\nError: Required at Context. (test/unit/runner.spec.js:43:15) at callFn (lib/runnable.js:366:21) at Test.Runnable.run (lib/runnable.js:354:5) at Runner.runTest (lib/runner.js:666:10) at /home/runner/work/mocha/mocha/lib/runner.js:789:12 at next (lib/runner.js:581:14) at /home/runner/work/mocha/mocha/lib/runner.js:591:7 at next (lib/runner.js:474:14) at Immediate._onImmediate (lib/runner.js:559:5) at processImmediate (internal/timers.js:464:21)" + }, + { + "path": "/home/runner/work/mocha/mocha/test/unit/test.spec.js", + "start_line": 0, + "end_line": 0, + "annotation_level": "failure", + "message": "tests.json [took 4s]", + "title": "Test .clone() should copy the title with error", + "raw_details": "[31m[1mexpected[22m[39m [36m'To be cloned'[39m [31m[1mto be[22m[39m [36m'Not to be cloned'[39m[41m[30mTo[39m[49m[31m be cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe cloned[39m\nUnexpectedError\n[31m[1mexpected[22m[39m [36m'To be cloned'[39m [31m[1mto be[22m[39m [36m'Not to be cloned'[39m[41m[30mTo[39m[49m[31m be cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe cloned[39m\nUnexpectedError: [31m[1mexpected[22m[39m [36m'To be cloned'[39m [31m[1mto be[22m[39m [36m'Not to be cloned'[39m[41m[30mTo[39m[49m[31m be cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe cloned[39m at Context. (test/unit/test.spec.js:26:7) at callFn (lib/runnable.js:366:21) at Test.Runnable.run (lib/runnable.js:354:5) at Runner.runTest (lib/runner.js:666:10) at /home/runner/work/mocha/mocha/lib/runner.js:789:12 at next (lib/runner.js:581:14) at /home/runner/work/mocha/mocha/lib/runner.js:591:7 at next (lib/runner.js:474:14) at cbHookRun (lib/runner.js:539:7) at done (lib/runnable.js:310:5) at callFn (lib/runnable.js:389:7) at Hook.Runnable.run (lib/runnable.js:354:5) at next (lib/runner.js:498:10) at Immediate._onImmediate (lib/runner.js:559:5) at processImmediate (internal/timers.js:464:21) set UNEXPECTED_FULL_TRACE=true to see the full stack trace" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 skipped test, see \"Raw output\" for the name of the skipped test.", + "title": "1 skipped test found", + "raw_details": "Mocha instance method run() should initialize the stats collector" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 5 tests, see \"Raw output\" for the full list of tests.", + "title": "5 tests found", + "raw_details": "Context Siblings sequestered sibling should work\nContext nested should work\nMocha instance method run() should initialize the stats collector\nRunner instance method grep() should update the runner.total with number of matched tests\nTest .clone() should copy the title" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "12", + "tests": "5", + "tests_succ": "2", + "tests_skip": "1", + "tests_fail": "1", + "tests_error": "1", + "runs": "5", + "runs_succ": "2", + "runs_skip": "1", + "runs_fail": "1", + "runs_error": "1", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/mstest/clicketyclackety.annotations b/python/test/files/nunit/mstest/clicketyclackety.annotations index 17c31d8d..cf20a207 100644 --- a/python/test/files/nunit/mstest/clicketyclackety.annotations +++ b/python/test/files/nunit/mstest/clicketyclackety.annotations @@ -7,29 +7,10 @@ 'output': { 'title': '10 fail, 12 pass in 0s', 'summary': - '\u205f\u20041 files\u2004\u2003\u205f\u20048 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '22 tests\u200312 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '10 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n23 runs\u2006\u2003' - '13 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '10 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '\u20071 files\u2004\u2003\u20078 suites\u2004\u2003\u20020s ' + ':stopwatch:\n22 tests\u200312 :white_check_mark:\u20030 :zzz:\u2003' + '10 :x:\n23 runs\u200a\u200313 :white_check_mark:\u20030 :zzz:\u2003' + '10 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/CzMV7GENTYCGJKWRnvbiEo7Ppmpu+WG5jVy0l0jZA+ACUYGZaAisCdjC0jFxSrvv' '9g9kHr+FklB1z1ft4UmDgpyYroMG8wnEk55Ps3lqAIE9e+FNQ67awFYsiX8LuSzwvzas/' diff --git a/python/test/files/nunit/mstest/clicketyclackety.results.json b/python/test/files/nunit/mstest/clicketyclackety.results.json new file mode 100644 index 00000000..98512040 --- /dev/null +++ b/python/test/files/nunit/mstest/clicketyclackety.results.json @@ -0,0 +1,143 @@ +{ + "title": "10 fail, 12 pass in 0s", + "summary": " 1 files   8 suites   0s :stopwatch:\n22 tests 12 :white_check_mark: 0 :zzz: 10 :x:\n23 runs  13 :white_check_mark: 0 :zzz: 10 :x:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 8, + "duration": 0, + "tests": 22, + "tests_succ": 12, + "tests_skip": 0, + "tests_fail": 10, + "tests_error": 0, + "runs": 23, + "runs_succ": 13, + "runs_skip": 0, + "runs_fail": 10, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "BakeDrawings failed", + "raw_details": "System.InvalidOperationException : Assert.Equals should not be used. Use Assert.AreEqual instead.\n at NUnit.Framework.Assert.Equals(Object a, Object b)\n at MyProject.Tests.Real.UserInput.BakeDrawingCommandTests.BakeDrawings()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "SilentRun failed", + "raw_details": "System.NullReferenceException : Object reference not set to an instance of an object.\n at MyProject.Tests.Real.UserInput.ProjectInitCommandTests.SilentRun()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "DiscardDrawingTests failed", + "raw_details": "System.IO.DirectoryNotFoundException : Could not find a part of the path 'C:\\Users\\USER\\actions-runner\\_work\\MyProject\\MyProject\\SC\\f4a8fa46-245d-4cd5-88c1-80fcfbda6369'.\n at System.IO.__Error.WinIOError(Int32 errorCode, String maybeFullPath)\n at System.IO.FileSystemEnumerableIterator`1.CommonInit()\n at System.IO.FileSystemEnumerableIterator`1..ctor(String path, String originalUserPath, String searchPattern, SearchOption searchOption, SearchResultHandler`1 resultHandler, Boolean checkHost)\n at System.IO.Directory.GetFiles(String path)\n at MyProject.Tests.Real.FlagTests.DiscardDrawingTests()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "LoadDrawingsEventFlagTests failed", + "raw_details": " Expected: 3\n But was: 0\n at MyProject.Tests.Real.FlagTests.LoadDrawingsEventFlagTests()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "ResetProjectEventFlagTests failed", + "raw_details": "System.NullReferenceException : Object reference not set to an instance of an object.\n at MyProject.Tests.Real.FlagTests.ResetProjectEventFlagTests()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "SetupLayersEventFlagTests failed", + "raw_details": "om.Exceptions.DocumentException : Document should be initlised, but isn't!\n at MyProject.Runtime.Events.SetupLayers.Execute()\n at MyProject.Tests.Real.FlagTests.SetupLayersEventFlagTests()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "SetupPipeEventFlagTests failed", + "raw_details": "System.NullReferenceException : Object reference not set to an instance of an object.\n at MyProject.Tests.Real.FlagTests.SetupPipeEventFlagTests()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "DrawingConstants failed", + "raw_details": "System.NullReferenceException : Object reference not set to an instance of an object.\n at MyProject.Tests.Real.RuntimeTests.DrawingConstants()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "FileConstants failed", + "raw_details": "System.NullReferenceException : Object reference not set to an instance of an object.\n at MyProject.Tests.Real.RuntimeTests.FileConstants()" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/clicketyclackety.xml [took 0s]", + "title": "PluginConstants failed", + "raw_details": "System.MissingMethodException : Method not found: 'System.Object MyProject.MyProjectPlugIn.get_Instance()'.\n at MyProject.Tests.Real.RuntimeTests.PluginConstants()" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 22 tests, see \"Raw output\" for the full list of tests.", + "title": "22 tests found", + "raw_details": "BakeDrawings\nDeleteMyProjectObjectEventFlagTests\nDiscardDrawingTests\nDisplayGraphicConstants\nDrawingConstants\nEventRegisterTests\nFileConstants\nLoadDrawingsEventFlagTests\nLoadedDrawings\nModifyNewObjectUniqueIdEventFlagTests\nMoveControlPointEventFlagTests\nObjectConstants\nPluginConstants\nResetProjectEventFlagTests\nSetupLayersEventFlagTests\nSetupPipeEventFlagTests\nSilentRun\nTest\nUIPanelConstants\nUIPropertyConstants\nUpdateDrawingsPanelEventFlagTests\nUpdatePropertiesPanelEventFlagTests" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "8", + "duration": "0", + "tests": "22", + "tests_succ": "12", + "tests_skip": "0", + "tests_fail": "10", + "tests_error": "0", + "runs": "23", + "runs_succ": "13", + "runs_skip": "0", + "runs_fail": "10", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/mstest/pickles.annotations b/python/test/files/nunit/mstest/pickles.annotations index b3ee25bd..e05c2074 100644 --- a/python/test/files/nunit/mstest/pickles.annotations +++ b/python/test/files/nunit/mstest/pickles.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 3 pass in 0s', 'summary': - '4 tests\u2002\u2003\u20033 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '2 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '4 tests\u2002\u2003\u20033 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n2 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfxVXoYQxLiRj1mgMt7dFSHSzZuZvItvYHTgCxs6xkOCmGEkWBPKCN4R9oQ0xHeaax' 'YhKUXF9BcHnO1bbBJMUX+FRvRYLphc9b2x1X382zI3ssytS3lrIRKUxMIu+f0AuKmg790' diff --git a/python/test/files/nunit/mstest/pickles.results.json b/python/test/files/nunit/mstest/pickles.results.json new file mode 100644 index 00000000..b57e41dd --- /dev/null +++ b/python/test/files/nunit/mstest/pickles.results.json @@ -0,0 +1,62 @@ +{ + "title": "1 fail, 3 pass in 0s", + "summary": "4 tests   3 :white_check_mark:  0s :stopwatch:\n2 suites  0 :zzz:\n1 files    1 :x:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 2, + "duration": 0, + "tests": 4, + "tests_succ": 3, + "tests_skip": 0, + "tests_fail": 1, + "tests_error": 0, + "runs": 4, + "runs_succ": 3, + "runs_skip": 0, + "runs_fail": 1, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/pickles.xml [took 0s]", + "title": "Pickles.TestHarness.AdditionFeature.FailToAddTwoNumbers failed", + "raw_details": "\n at Pickles.TestHarness.xUnit.Steps.ThenTheResultShouldBePass(Int32 result) in C:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarness.NUnit\\Steps.cs:line 26\nat lambda_method(Closure , IContextManager , Int32 )\nat TechTalk.SpecFlow.Bindings.MethodBinding.InvokeAction(IContextManager contextManager, Object[] arguments, ITestTracer testTracer, TimeSpan& duration)\nat TechTalk.SpecFlow.Bindings.StepDefinitionBinding.Invoke(IContextManager contextManager, ITestTracer testTracer, Object[] arguments, TimeSpan& duration)\nat TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.ExecuteStepMatch(BindingMatch match, Object[] arguments)\nat TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.ExecuteStep(StepArgs stepArgs)\nat TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.OnAfterLastStep()\nat TechTalk.SpecFlow.TestRunner.CollectScenarioErrors()\nat Pickles.TestHarness.AdditionFeature.ScenarioCleanup() in C:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarness.NUnit\\Addition.feature.cs:line 0\nat Pickles.TestHarness.AdditionFeature.FailToAddTwoNumbers() in c:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarness.NUnit\\Addition.feature:line 18" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 4 tests, see \"Raw output\" for the full list of tests.", + "title": "4 tests found", + "raw_details": "Pickles.TestHarness.AdditionFeature.AddTwoNumbers\nPickles.TestHarness.AdditionFeature.AddingSeveralNumbers(\"40\",\"50\",\"90\",System.String[])\nPickles.TestHarness.AdditionFeature.AddingSeveralNumbers(\"60\",\"70\",\"130\",System.String[])\nPickles.TestHarness.AdditionFeature.FailToAddTwoNumbers" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "2", + "duration": "0", + "tests": "4", + "tests_succ": "3", + "tests_skip": "0", + "tests_fail": "1", + "tests_error": "0", + "runs": "4", + "runs_succ": "3", + "runs_skip": "0", + "runs_fail": "1", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/mstest/timewarpinc.annotations b/python/test/files/nunit/mstest/timewarpinc.annotations index 94bd8766..dec20d71 100644 --- a/python/test/files/nunit/mstest/timewarpinc.annotations +++ b/python/test/files/nunit/mstest/timewarpinc.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail in 2s', 'summary': - '1 tests\u2002\u2003\u20030 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '2s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 tests\u2002\u2003\u20030 :white_check_mark:\u2003\u20032s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQ6AIAwEv' '0I4e1CPfsYQhNiIYAqcjH+3IgjednbbObkGozyf2NAx7iOED5aIIoCzhCMhDaFMKc8+Sk' 'lFX4sNjl+hBZjfi0J0mE8w2uJ7Yqt7udoSN7LErUu6fYdAkBPzq+DXDXGDl7HdAAAA\n', diff --git a/python/test/files/nunit/mstest/timewarpinc.results.json b/python/test/files/nunit/mstest/timewarpinc.results.json new file mode 100644 index 00000000..a71c2166 --- /dev/null +++ b/python/test/files/nunit/mstest/timewarpinc.results.json @@ -0,0 +1,62 @@ +{ + "title": "1 fail in 2s", + "summary": "1 tests   0 :white_check_mark:  2s :stopwatch:\n1 suites  0 :zzz:\n1 files    1 :x:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 2, + "tests": 1, + "tests_succ": 0, + "tests_skip": 0, + "tests_fail": 1, + "tests_error": 0, + "runs": 1, + "runs_succ": 0, + "runs_skip": 0, + "runs_fail": 1, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "mstest/timewarpinc.xml [took 2s]", + "title": "ValidateSceneContainer(\"Assets/Scenes/Grid/GridTest.unity\") failed", + "raw_details": "Zenject.ZenjectException : Zenject Validation Failed! See errors below for details.\n at Zenject.Internal.ZenUnityEditorUtil.ValidateCurrentSceneSetup () [0x0009c] in /github/workspace/Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:82\n at MP.Tests.AssetValidatorTest.ValidateSceneContainer (System.String scenePath) [0x00009] in /github/workspace/Assets/Tests/EditorMode/AssetValidatorTest.cs:58\n at (wrapper managed-to-native) System.Reflection.RuntimeMethodInfo.InternalInvoke(System.Reflection.RuntimeMethodInfo,object,object[],System.Exception&)\n at System.Reflection.RuntimeMethodInfo.Invoke (System.Object obj, System.Reflection.BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x0006a] in :0\nAssertionException: Could not find a tilemap tagged with LevelBounds.\nAssertion failure. Value was Null\nExpected: Value was not Null\nUnityEngine.Assertions.Assert.Fail (System.String message, System.String userMessage) (at /home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/AssertBase.cs:29)\nUnityEngine.Assertions.Assert.IsNotNull (UnityEngine.Object value, System.String message) (at /home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/AssertNull.cs:58)\nUnityEngine.Assertions.Assert.IsNotNull[T] (T value, System.String message) (at /home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/AssertNull.cs:46)\nMP.Gameplay.Level.LevelInstaller.InstallBindings () (at Assets/Scripts/Gameplay/Level/LevelInstaller.cs:30)\nZenject.CompositeMonoInstaller.InstallBindings () (at Assets/ThirdParty/Zenject/Source/Install/CompositeMonoInstaller.cs:25)\nZenject.Context.InstallInstallers (System.Collections.Generic.List`1[T] normalInstallers, System.Collections.Generic.List`1[T] normalInstallerTypes, System.Collections.Generic.List`1[T] scriptableObjectInstallers, System.Collections.Generic.List`1[T] installers, System.Collections.Generic.List`1[T] installerPrefabs) (at Assets/ThirdParty/Zenject/Source/Install/Contexts/Context.cs:218)\nZenject.Context.InstallInstallers () (at Assets/ThirdParty/Zenject/Source/Install/Contexts/Context.cs:139)\nZenject.SceneContext.InstallBindings (System.Collections.Generic.List`1[T] injectableMonoBehaviours) (at Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:346)\nZenject.SceneContext.Install () (at Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:265)\nZenject.SceneContext.Validate () (at Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:121)\nZenject.Internal.ZenUnityEditorUtil.ValidateCurrentSceneSetup () (at Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:67)\nUnityEngine.Debug:LogException(Exception)\nModestTree.Log:ErrorException(Exception) (at Assets/ThirdParty/Zenject/Source/Internal/Log.cs:60)\nZenject.Internal.ZenUnityEditorUtil:ValidateCurrentSceneSetup() (at Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:72)\nMP.Tests.AssetValidatorTest:ValidateSceneContainer(String) (at Assets/Tests/EditorMode/AssetValidatorTest.cs:58)\nSystem.Reflection.MethodBase:Invoke(Object, Object[])\nNUnit.Framework.Internal.Reflect:InvokeMethod(MethodInfo, Object, Object[])\nNUnit.Framework.Internal.MethodWrapper:Invoke(Object, Object[])\nNUnit.Framework.Internal.Commands.TestMethodCommand:RunNonAsyncTestMethod(ITestExecutionContext)\nNUnit.Framework.Internal.Commands.TestMethodCommand:RunTestMethod(ITestExecutionContext)\nNUnit.Framework.Internal.Commands.TestMethodCommand:Execute(ITestExecutionContext)\nUnityEditor.EditorApplication:Internal_CallUpdateFunctions() (at /home/bokken/buildslave/unity/build/Editor/Mono/EditorApplication.cs:359)" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 test, see \"Raw output\" for the name of the test.", + "title": "1 test found", + "raw_details": "ValidateSceneContainer(\"Assets/Scenes/Grid/GridTest.unity\")" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "2", + "tests": "1", + "tests_succ": "0", + "tests_skip": "0", + "tests_fail": "1", + "tests_error": "0", + "runs": "1", + "runs_succ": "0", + "runs_skip": "0", + "runs_fail": "1", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations index c396e30d..8b60e0ad 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations @@ -7,21 +7,9 @@ 'output': { 'title': '1 errors, 1 fail, 8 skipped, 18 pass in 0s', 'summary': - '28 tests\u2002\u2003\u200318 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '11 suites\u2003\u2003\u205f\u20048 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20041 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 ' - '[:fire:](https://github.com/step-security/publish-unit-test-result-ac' - 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for ' - 'commit commit s.\n\n' + '28 tests\u2002\u2003\u200318 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n11 suites\u2003\u2003\u20078 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '1 :x:\u2003\u20031 :fire:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr' '0KYHdTJeBlDUGKjiGlhMt7dIqi49b3fvEMaWCeSvWgqISmATxBpDKg8uI25ZuTFx63tHh' 'goaB2/C7PAzuYTRsGa60lMiA6zwbC9xXj/gkl8vZuL3M1lTTtrwTPkS9Cs5HkBSPFg+uI' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.results.json new file mode 100644 index 00000000..945ab7d4 --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.results.json @@ -0,0 +1,80 @@ +{ + "title": "1 errors, 1 fail, 8 skipped, 18 pass in 0s", + "summary": "28 tests   18 :white_check_mark:  0s :stopwatch:\n11 suites   8 :zzz:\n 1 files     1 :x:  1 :fire:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 11, + "duration": 0, + "tests": 28, + "tests_succ": 18, + "tests_skip": 8, + "tests_fail": 1, + "tests_error": 1, + "runs": 28, + "runs_succ": 18, + "runs_skip": 8, + "runs_fail": 1, + "runs_error": 1, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-correct.xml [took 0s]", + "title": "NUnit.Tests.Assemblies.MockTestFixture.FailingTest failed", + "raw_details": "Intentional failure\n\n at NUnit.Tests.Assemblies.MockTestFixture.FailingTest () [0x00000] in /home/charlie/Dev/NUnit/nunit-2.5/work/src/tests/mock-assembly/MockAssembly.cs:121" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "failure", + "message": "nunit3/jenkins/NUnit-correct.xml [took 0s]", + "title": "NUnit.Tests.Assemblies.MockTestFixture.TestWithException with error", + "raw_details": "System.ApplicationException : Intentional Exception\n\n at NUnit.Tests.Assemblies.MockTestFixture.TestWithException () [0x00000] in /home/charlie/Dev/NUnit/nunit-2.5/work/src/tests/mock-assembly/MockAssembly.cs:153" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 8 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "8 skipped tests found", + "raw_details": "NUnit.Tests.Assemblies.MockTestFixture.InconclusiveTest\nNUnit.Tests.Assemblies.MockTestFixture.MockTest4\nNUnit.Tests.Assemblies.MockTestFixture.MockTest5\nNUnit.Tests.Assemblies.MockTestFixture.NotRunnableTest\nNUnit.Tests.BadFixture.SomeTest\nNUnit.Tests.IgnoredFixture.Test1\nNUnit.Tests.IgnoredFixture.Test2\nNUnit.Tests.IgnoredFixture.Test3" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 28 tests, see \"Raw output\" for the full list of tests.", + "title": "28 tests found", + "raw_details": "NUnit.Tests.Assemblies.MockTestFixture.FailingTest\nNUnit.Tests.Assemblies.MockTestFixture.InconclusiveTest\nNUnit.Tests.Assemblies.MockTestFixture.MockTest1\nNUnit.Tests.Assemblies.MockTestFixture.MockTest2\nNUnit.Tests.Assemblies.MockTestFixture.MockTest3\nNUnit.Tests.Assemblies.MockTestFixture.MockTest4\nNUnit.Tests.Assemblies.MockTestFixture.MockTest5\nNUnit.Tests.Assemblies.MockTestFixture.NotRunnableTest\nNUnit.Tests.Assemblies.MockTestFixture.TestWithException\nNUnit.Tests.Assemblies.MockTestFixture.TestWithManyProperties\nNUnit.Tests.BadFixture.SomeTest\nNUnit.Tests.FixtureWithTestCases.GenericMethod(9.2d,11.7d)\nNUnit.Tests.FixtureWithTestCases.GenericMethod(2,4)\nNUnit.Tests.FixtureWithTestCases.MethodWithParameters(2,2)\nNUnit.Tests.FixtureWithTestCases.MethodWithParameters(9,11)\nNUnit.Tests.GenericFixture(11.5d).Test1\nNUnit.Tests.GenericFixture(11.5d).Test2\nNUnit.Tests.GenericFixture(5).Test1\nNUnit.Tests.GenericFixture(5).Test2\nNUnit.Tests.IgnoredFixture.Test1\nNUnit.Tests.IgnoredFixture.Test2\nNUnit.Tests.IgnoredFixture.Test3\nNUnit.Tests.ParameterizedFixture(42).Test1\nNUnit.Tests.ParameterizedFixture(42).Test2\nNUnit.Tests.ParameterizedFixture(5).Test1\nNUnit.Tests.ParameterizedFixture(5).Test2\nNUnit.Tests.Singletons.OneTestCase.TestCase\nNUnit.Tests.TestAssembly.MockTestFixture.MyTest" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "11", + "duration": "0", + "tests": "28", + "tests_succ": "18", + "tests_skip": "8", + "tests_fail": "1", + "tests_error": "1", + "runs": "28", + "runs_succ": "18", + "runs_skip": "8", + "runs_fail": "1", + "runs_error": "1", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations index 4369109d..f8b19140 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations @@ -7,29 +7,10 @@ 'output': { 'title': 'All 183 tests pass in 0s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003102 suites\u2004\u2003\u2002' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '183 tests\u2003183 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '0 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n218 runs\u2006\u2003' - '218 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '0 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '\u2007\u20071 files\u2004\u2003102 suites\u2004\u2003\u20020s ' + ':stopwatch:\n183 tests\u2003183 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n218 runs\u200a\u2003218 :white_check_mark:\u20030 :zzz:\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MSw6AIAwFr' '0JYu/CzMV7GEITY+MGUsjLe3YoY0V1nXjq7tDAbLztRFUL6AHRDWTMOARWBW1mUjDxRHN' 'vmod4Hrf9qgi3/6K2C+SMMosNkMKxXs67aBE8yN28xchaMnPe0WxYghnQJPyp5nNtosNP' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.results.json new file mode 100644 index 00000000..347ca802 --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 183 tests pass in 0s", + "summary": "  1 files  102 suites   0s :stopwatch:\n183 tests 183 :white_check_mark: 0 :zzz: 0 :x:\n218 runs  218 :white_check_mark: 0 :zzz: 0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 102, + "duration": 0, + "tests": 183, + "tests_succ": 183, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 218, + "runs_succ": 218, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 183 tests, see \"Raw output\" for the full list of tests.", + "title": "183 tests found", + "raw_details": "imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Clone.should_return_different_object\nimG.Approx.Tests.Components.BuildingBlocks.AmountTests+Clone.should_return_same_value\nimG.Approx.Tests.Components.BuildingBlocks.AmountTests+MutableComponents.should_return_empty\nimG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should_change_value_in_range(100,-10,1)\nimG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should_change_value_in_range(100,-100,1)\nimG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should_change_value_in_range(100,-2,8)\nimG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should_change_value_in_range(100,2,12)\nimG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should_change_value_in_range(1000,1000,321)\nimG.Approx.Tests.Components.BuildingBlocks.AmountTests+RandomizeValues.should_randomize_value_in_range\nimG.Approx.Tests.Components.BuildingBlocks.AngleTests+Clone.should_return_different_object\nimG.Approx.Tests.Components.BuildingBlocks.AngleTests+Clone.should_return_same_value\nimG.Approx.Tests.Components.BuildingBlocks.AngleTests+MutableComponents.should_return_empty\nimG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.should_wrap_by_a_value_between_1_and_max_value_in_both_directions(10,355,True,5)\nimG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.should_wrap_by_a_value_between_1_and_max_value_in_both_directions(10,40,False,330)\nimG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.should_wrap_by_a_value_between_1_and_max_value_in_both_directions(10,40,True,50)\nimG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.should_wrap_by_a_value_between_1_and_max_value_in_both_directions(10,6,False,4)\nimG.Approx.Tests.Components.BuildingBlocks.AngleTests+RandomizeValue.should_select_a_value_between_0_and_360\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+Clone.should_clone_component\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+Clone.should_copy_values\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+Constructor.should_initialize_data\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+ImplicitConversionToDrawingColor.should_convert_to_drawing_color\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+MutableComponents.should_not_contain_anything\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeAlpha.should_randomize_alpha_in_range\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeBlue.should_randomize_alpha_in_range\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeGreen.should_randomize_alpha_in_range\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeRed.should_randomize_alpha_in_range\nimG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeValues.should_randomize_colors_in_the_range\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Clone.should_return_different_object\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Clone.should_return_same_value\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+MutableComponents.should_return_empty\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.should_change_value_in_range(100,-10,1)\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.should_change_value_in_range(100,-100,1)\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.should_change_value_in_range(100,-2,8)\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.should_change_value_in_range(100,10,16)\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.should_change_value_in_range(100,2,12)\nimG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+RandomizeValues.should_randomize_value_in_range\nimG.Approx.Tests.Components.BuildingBlocks.PositionTests+Clone.should_return_different_object\nimG.Approx.Tests.Components.BuildingBlocks.PositionTests+Clone.should_return_same_value\nimG.Approx.Tests.Components.BuildingBlocks.PositionTests+MutableComponents.should_return_empty\nimG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.should_change_value_in_range(10,10,1,False,9,9)\nimG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.should_change_value_in_range(10,10,1,True,11,11)\nimG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.should_change_value_in_range(10,10,100,False,0,0)\nimG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.should_change_value_in_range(10,10,1000,True,321,654)\nimG.Approx.Tests.Components.BuildingBlocks.PositionTests+RandomizeValues.should_return_value_inside_target_limits\nimG.Approx.Tests.Components.DrawingTest+Clone.should_clone_all_shapes\nimG.Approx.Tests.Components.DrawingTest+Clone.should_clone_inner_components\nimG.Approx.Tests.Components.DrawingTest+Clone.should_copy_properties\nimG.Approx.Tests.Components.DrawingTest+Clone.should_create_clone_of_target\nimG.Approx.Tests.Components.DrawingTest+Constructor.should_keep_data\nimG.Approx.Tests.Components.DrawingTest+Draw.should_draw_all_shapes\nimG.Approx.Tests.Components.DrawingTest+Draw.should_fill_image_with_background_color\nimG.Approx.Tests.Components.DrawingTest+Draw.should_return_correct_size_image\nimG.Approx.Tests.Components.DrawingTest+MutableComponents.should_contain_all_shapes\nimG.Approx.Tests.Components.DrawingTest+MutableComponents.should_contain_color\nimG.Approx.Tests.Components.Shapes.AreaTests+Clone.should_return_different_object(System.Func`2[imG.Approx.Components.Shapes.Area,System.Object])\nimG.Approx.Tests.Components.Shapes.AreaTests+Draw.should_draw\nimG.Approx.Tests.Components.Shapes.AreaTests+InitializeComponents.should_randomize_elements\nimG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Area,imG.Approx.Components.BuildingBlocks.Angle)\nimG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Area,imG.Approx.Components.BuildingBlocks.Color)\nimG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Area,imG.Approx.Components.BuildingBlocks.Position)\nimG.Approx.Tests.Components.Shapes.BezierTests+Clone.should_return_different_object(System.Func`2[imG.Approx.Components.Shapes.Bezier,System.Object])\nimG.Approx.Tests.Components.Shapes.BezierTests+Draw.should_draw\nimG.Approx.Tests.Components.Shapes.BezierTests+InitializeComponents.should_randomize_elements\nimG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Bezier,imG.Approx.Components.BuildingBlocks.Color)\nimG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Bezier,imG.Approx.Components.BuildingBlocks.PenSize)\nimG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Bezier,imG.Approx.Components.BuildingBlocks.Position)\nimG.Approx.Tests.Components.Shapes.BlobTests+Clone.should_return_different_object(System.Func`2[imG.Approx.Components.Shapes.Blob,System.Object])\nimG.Approx.Tests.Components.Shapes.BlobTests+Draw.should_draw\nimG.Approx.Tests.Components.Shapes.BlobTests+InitializeComponents.should_randomize_elements\nimG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Blob,imG.Approx.Components.BuildingBlocks.Color)\nimG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Blob,imG.Approx.Components.BuildingBlocks.Position)\nimG.Approx.Tests.Components.Shapes.CircleTests+Clone.should_return_different_object(System.Func`2[imG.Approx.Components.Shapes.Circle,System.Object])\nimG.Approx.Tests.Components.Shapes.CircleTests+Draw.should_draw\nimG.Approx.Tests.Components.Shapes.CircleTests+InitializeComponents.should_randomize_elements\nimG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Circle,imG.Approx.Components.BuildingBlocks.Amount)\nimG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Circle,imG.Approx.Components.BuildingBlocks.Color)\nimG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Circle,imG.Approx.Components.BuildingBlocks.Position)\nimG.Approx.Tests.Components.Shapes.Factories.ConcreteFactory.ShapeFactoryTests+GetShape.should_return_shape\nimG.Approx.Tests.Components.Shapes.Factories.ConcreteFactory.ShapeFactoryTests+Name.should_return_name_by_default\nimG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTests+ActiveFactories.should_return_only_active_factories\nimG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTests+Disable.should_enable_factories_named\nimG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTests+DisableAll.should_disable_all_factories\nimG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTests+Enable.should_enable_factories_named\nimG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTests+EnableAll.should_enable_all_factories\nimG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTests+Register.should_add_factory\nimG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTests+RegisterAllFactories.should_register_all_factories\nimG.Approx.Tests.Components.Shapes.LineTests+Clone.should_return_different_object(System.Func`2[imG.Approx.Components.Shapes.Line,System.Object])\nimG.Approx.Tests.Components.Shapes.LineTests+Draw.should_draw\nimG.Approx.Tests.Components.Shapes.LineTests+InitializeComponents.should_randomize_elements\nimG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Line,imG.Approx.Components.BuildingBlocks.Color)\nimG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Line,imG.Approx.Components.BuildingBlocks.PenSize)\nimG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Line,imG.Approx.Components.BuildingBlocks.Position)\nimG.Approx.Tests.Components.Shapes.PolygonTests+Clone.should_return_different_object(System.Func`2[imG.Approx.Components.Shapes.Polygon,System.Object])\nimG.Approx.Tests.Components.Shapes.PolygonTests+Draw.should_draw\nimG.Approx.Tests.Components.Shapes.PolygonTests+InitializeComponents.should_randomize_elements\nimG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Polygon,imG.Approx.Components.BuildingBlocks.Color)\nimG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Polygon,imG.Approx.Components.BuildingBlocks.Position)\nimG.Approx.Tests.Components.Shapes.RectangleTests+Clone.should_return_different_object(System.Func`2[imG.Approx.Components.Shapes.Rectangle,System.Object])\nimG.Approx.Tests.Components.Shapes.RectangleTests+Draw.should_draw\nimG.Approx.Tests.Components.Shapes.RectangleTests+InitializeComponents.should_randomize_elements\nimG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Rectangle,imG.Approx.Components.BuildingBlocks.Amount)\nimG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Rectangle,imG.Approx.Components.BuildingBlocks.Color)\nimG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Rectangle,imG.Approx.Components.BuildingBlocks.Position)\nimG.Approx.Tests.Components.Shapes.TriangleTests+Clone.should_return_different_object(System.Func`2[imG.Approx.Components.Shapes.Triangle,System.Object])\nimG.Approx.Tests.Components.Shapes.TriangleTests+Draw.should_draw\nimG.Approx.Tests.Components.Shapes.TriangleTests+InitializeComponents.should_randomize_elements\nimG.Approx.Tests.Components.Shapes.TriangleTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Triangle,imG.Approx.Components.BuildingBlocks.Color)\nimG.Approx.Tests.Components.Shapes.TriangleTests+MutableComponents.should_return_components(imG.Approx.Components.Shapes.Triangle,imG.Approx.Components.BuildingBlocks.Position)\nimG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return_mutation_description_determined_by_random_provider(System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,System.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,0,0)\nimG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return_mutation_description_determined_by_random_provider(System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,System.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,0,1)\nimG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return_mutation_description_determined_by_random_provider(System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,System.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,6,0)\nimG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return_mutation_description_determined_by_random_provider(System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,System.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,6,1)\nimG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return_mutation_description_determined_by_random_provider(System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,System.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,7,0)\nimG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return_mutation_description_determined_by_random_provider(System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,System.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,7,1)\nimG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return_null_if_no_mutation_exists\nimG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.should_return_active_and_applicable_and_selectable_mutations\nimG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.should_return_empty_if_mutable_is_unknown\nimG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.should_return_mutations_recursively\nimG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_always_have_occasions_to_mutate\nimG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_always_mutate_without_doing_anything_to_the_target\nimG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_always_target_IMutableType\nimG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_be_always_able_to_mutate\nimG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_be_always_active\nimG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_return_a_mutation\nimG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_return_matching_selected_mutation\nimG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_return_the_default_mutation_if_no_mutation_exists\nimG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_throw_if_any_component_is_null(imG.Approx.Mutation.Process,null)\nimG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_throw_if_any_component_is_null(null,Castle.Proxies.IMutableProxy)\nimG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+DeclareMutation.should_add_description_to_catalog\nimG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+DeclareMutation.should_throw_when_the_same_description_is_declared_twice\nimG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.should_return_empty_list_for_unknown_mutable_type\nimG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.should_return_list_of_descriptions_for_type(imG.Approx.Tests.Mutation.MutableAndDescription.Mutable1,imG.Approx.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable1],imG.Approx.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable2])\nimG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.should_return_list_of_descriptions_for_type(imG.Approx.Tests.Mutation.MutableAndDescription.Mutable2,imG.Approx.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable2],imG.Approx.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable3])\nimG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.should_return_list_of_descriptions_for_type(imG.Approx.Tests.Mutation.MutableAndDescription.Mutable3,imG.Approx.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable3],imG.Approx.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable1])\nimG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+RegisterAllMutations.should_register_all_mutations_declared_by_registrars\nimG.Approx.Tests.Mutation.MutationDescriptionTests+CanMutate.lambda_is_called_when_checking\nimG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.should_refuse_odds_that_are_not_positive(-1)\nimG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.should_refuse_odds_that_are_not_positive(-1000)\nimG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.should_refuse_odds_that_are_not_positive(0)\nimG.Approx.Tests.Mutation.MutationDescriptionTests+GetMutationTargetType.should_return_type_of_generic\nimG.Approx.Tests.Mutation.MutationDescriptionTests+Mutate.lambda_is_called_when_mutating\nimG.Approx.Tests.Mutation.ProcessTests+Constructor.should_throw_if_any_argument_is_null(Castle.Proxies.IRandomizationProviderProxy,Castle.Proxies.IMutationDescriptionCatalogProxy,Castle.Proxies.ITargetProxy,null)\nimG.Approx.Tests.Mutation.ProcessTests+Constructor.should_throw_if_any_argument_is_null(Castle.Proxies.IRandomizationProviderProxy,Castle.Proxies.IMutationDescriptionCatalogProxy,null,Castle.Proxies.IShapeFactoryCatalogProxy)\nimG.Approx.Tests.Mutation.ProcessTests+Constructor.should_throw_if_any_argument_is_null(Castle.Proxies.IRandomizationProviderProxy,null,Castle.Proxies.ITargetProxy,Castle.Proxies.IShapeFactoryCatalogProxy)\nimG.Approx.Tests.Mutation.ProcessTests+Constructor.should_throw_if_any_argument_is_null(null,Castle.Proxies.IMutationDescriptionCatalogProxy,Castle.Proxies.ITargetProxy,Castle.Proxies.IShapeFactoryCatalogProxy)\nimG.Approx.Tests.Mutation.ProcessTests+Mutate.should_always_keep_best_drawing_according_to_distance(False)\nimG.Approx.Tests.Mutation.ProcessTests+Mutate.should_always_keep_best_drawing_according_to_distance(True)\nimG.Approx.Tests.Mutation.ProcessTests+Mutate.should_increase_evolutions_when_drawing_is_better\nimG.Approx.Tests.Mutation.ProcessTests+Mutate.should_increase_generation_number\nimG.Approx.Tests.Mutation.ProcessTests+Mutate.should_trigger_event_when_drawing_is_better\nimG.Approx.Tests.Mutation.ProcessTests+Mutate.should_trigger_event_when_drawing_is_worse\nimG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.should_compute_the_distance_only_the_first_time\nimG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.should_create_drawing_based_on_target\nimG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.should_create_the_drawing_only_the_first_time\nimG.Approx.Tests.Mutation.RandomizationProviderTests+Constructor.should_keep_the_seed\nimG.Approx.Tests.Mutation.RandomizationProviderTests+Next.should_return_integer\nimG.Approx.Tests.Mutation.TargetTests+Constructor.should_keep_initialized_data(System.Func`2[imG.Approx.Mutation.Target,System.Object],\"data\\\\red.png\")\nimG.Approx.Tests.Mutation.TargetTests+Constructor.should_keep_initialized_data(System.Func`2[imG.Approx.Mutation.Target,System.Object],25)\nimG.Approx.Tests.Mutation.TargetTests+DistanceTo.should_not_throw_if_dimensions_are_identical\nimG.Approx.Tests.Mutation.TargetTests+DistanceTo.should_throw_if_dimensions_are_different(imG.Approx.Components.Drawing)\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_load_dimensions_from_image\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_load_image_data\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_not_resize_if_image_dimensions_are_over_or_equal_to_maxDimension(100)\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_not_resize_if_image_dimensions_are_over_or_equal_to_maxDimension(50)\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_resize_if_image_dimensions_are_over_maxDimension\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_set_ratio_to_correct_value_when_loading(10,0.2f)\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_set_ratio_to_correct_value_when_loading(25,0.5f)\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_set_ratio_to_correct_value_when_loading(50,1.0f)\nimG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_set_ratio_to_correct_value_when_loading(99,1.0f)\nimG.Approx.Tests.Mutation.TargetTests+Name.should_return_filename\nimG.Approx.Tests.Tools.TestValues+Clamp.should_return_max_value_between_original_and_min_value(1,0,1)\nimG.Approx.Tests.Tools.TestValues+Clamp.should_return_max_value_between_original_and_min_value(1,1,1)\nimG.Approx.Tests.Tools.TestValues+Clamp.should_return_max_value_between_original_and_min_value(1,10,10)\nimG.Approx.Tests.Tools.TestValues+Clamp.should_return_min_value_between_original_and_max_value(1,0,0)\nimG.Approx.Tests.Tools.TestValues+Clamp.should_return_min_value_between_original_and_max_value(1,1,1)\nimG.Approx.Tests.Tools.TestValues+Clamp.should_return_min_value_between_original_and_max_value(1,10,1)\nimG.Approx.Tests.Tools.TestValues+Clamp.should_throw_if_min_is_above_max\nimG.Approx.Tests.Tools.TestValues+Wrap.should_throw_if_min_is_above_max\nimG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(-101,10,20,19)\nimG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(10,10,25,10)\nimG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(101,10,20,11)\nimG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(16,10,25,16)\nimG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(25,10,25,10)" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "102", + "duration": "0", + "tests": "183", + "tests_succ": "183", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "218", + "runs_succ": "218", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations index 79848140..f678db2a 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 22 tests pass in 4m 24s', 'summary': - '22 tests\u2002\u2003\u200322 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '4m 24s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '13 suites\u2003\u2003\u205f\u20040 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u2003\u205f\u20040 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '22 tests\u2002\u2003\u200322 :white_check_mark:\u2003\u20034m 24s ' + ':stopwatch:\n13 suites\u2003\u2003\u20070 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003\u2007' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr' '0KoLRSNhZcxBDVuFDELVMa7uwh+uzfzktn4AHNvecOKjHHrwUUoiTqP0oFZiEVdkaDPhV' 'eIC1rrlfqZCVYy+S0GCfNH9IgGk0G/3MWwP8Eont7Jr9zJ75oyWoMjSIvZUfL9APCIHb/' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.results.json new file mode 100644 index 00000000..85ee13b7 --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 22 tests pass in 4m 24s", + "summary": "22 tests   22 :white_check_mark:  4m 24s :stopwatch:\n13 suites   0 :zzz:\n 1 files     0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 13, + "duration": 264, + "tests": 22, + "tests_succ": 22, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 22, + "runs_succ": 22, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 22 tests, see \"Raw output\" for the full list of tests.", + "title": "22 tests found", + "raw_details": "BlogEngine.Tests.Account.Login.InvalidLoginShouldFail\nBlogEngine.Tests.Account.Login.ValidLoginShouldPass\nBlogEngine.Tests.Comments.Comment.CanAddUpdateAndDeleteComment\nBlogEngine.Tests.FileSystem.Crud.CanWriteAndReadAppCodeDirectory\nBlogEngine.Tests.FileSystem.Crud.CanWriteAndReadAppDataDirectory\nBlogEngine.Tests.Navigation.CustomPages.CanNavigateToCustomAspxPage\nBlogEngine.Tests.Navigation.CustomPages.CanNavigateToDefaultAspxPageInSubDiretory\nBlogEngine.Tests.Navigation.SubBlog.MyTest\nBlogEngine.Tests.Navigation.SubBlogAggregation.MyTest\nBlogEngine.Tests.Packaging.Installer.CanInstallAndUninstallTheme\nBlogEngine.Tests.Posts.Post.CanCreateAndDeletePost\nBlogEngine.Tests.QuickNotes.Crud.ShouldBeAbleToCreateUpdateAndDeleteNote\nBlogEngine.Tests.QuickNotes.Navigation.AdminShouldSeeQuickNotesPanel\nBlogEngine.Tests.QuickNotes.Navigation.AnonymousUserShouldNotSeeQuickNotesPanel\nBlogEngine.Tests.QuickNotes.Navigation.ShouldBeAbleBrowseThroughTabs\nBlogEngine.Tests.QuickNotes.Posting.PublishQuickNoteAsPost\nBlogEngine.Tests.Quixote.Runner.RunAvatarTests\nBlogEngine.Tests.Quixote.Runner.RunPackagingTests\nBlogEngine.Tests.Quixote.Runner.RunPagerTests\nBlogEngine.Tests.Quixote.Runner.RunUrlRewriteNoExtensionsTests\nBlogEngine.Tests.Quixote.Runner.RunUrlRewriteTests\nBlogEngine.Tests.Users.AuthorProfile.CanAddUpdateAndDeleteUserProfile" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "13", + "duration": "264", + "tests": "22", + "tests_succ": "22", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "22", + "runs_succ": "22", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations index ab57926e..ca0c972e 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 2 pass in 0s', 'summary': - '3 tests\u2002\u2003\u20032 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20032 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr' '0KoLfx0XoYQ1LhRwCxQGe/uykfo5s1M3s03OFfHZzZ0jLsA/ocloPRgDWFPSIP/pqlk4Y' 'JSVIy1OOBq32KTcGZbKlZEi/mCwRTfF1td4mqL3Mgity5ltQZPkBNzu+TPC/n9SCLdAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.results.json new file mode 100644 index 00000000..c2293433 --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.results.json @@ -0,0 +1,62 @@ +{ + "title": "1 fail, 2 pass in 0s", + "summary": "3 tests   2 :white_check_mark:  0s :stopwatch:\n1 suites  0 :zzz:\n1 files    1 :x:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 3, + "tests_succ": 2, + "tests_skip": 0, + "tests_fail": 1, + "tests_error": 0, + "runs": 3, + "runs_succ": 2, + "runs_skip": 0, + "runs_fail": 1, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-failure.xml [took 0s]", + "title": "UnitTests.MainClassTest.TestFailure failed", + "raw_details": " Expected failure\n Expected: 30\n But was: 20\n at UnitTests.MainClassTest.TestFailure () [0x00000] \n at <0x00000> \n at (wrapper managed-to-native) System.Reflection.MonoMethod:InternalInvoke (object,object[])\n at System.Reflection.MonoMethod.Invoke (System.Object obj, BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x00000]" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 3 tests, see \"Raw output\" for the full list of tests.", + "title": "3 tests found", + "raw_details": "UnitTests.MainClassTest.TestFailure\nUnitTests.MainClassTest.TestMethodUpdateValue\nUnitTests.MainClassTest.TestPropertyValue" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "3", + "tests_succ": "2", + "tests_skip": "0", + "tests_fail": "1", + "tests_error": "0", + "runs": "3", + "runs_succ": "2", + "runs_skip": "0", + "runs_fail": "1", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations index e1351673..0a746420 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations @@ -7,19 +7,9 @@ 'output': { 'title': '1 fail, 9 pass in 1s', 'summary': - '10 tests\u2002\u2003\u20039 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '1s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '\u205f\u20041 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004' - '1 files\u2004\u2002\u2003\u20031 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '10 tests\u2002\u2003\u20039 :white_check_mark:\u2003\u20031s ' + ':stopwatch:\n\u20071 suites\u2003\u20030 :zzz:\n\u20071 files\u2004\u2002\u2003\u2003' + '1 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ6AIAwEv' '0I4e9CjfoYQhNgoYAqcjH8XEaG3zu52Lm7g0IEvbBoYDwligzWhjOBdxVzEUo0/iJCUys' 'ncgx3OHPSFkXDQf6ERPdYJJteE7019H3ddYWIrTGXKWwsxQ71Y2CS/HxbYkAffAAAA\n', diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.results.json new file mode 100644 index 00000000..f3f041a5 --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.results.json @@ -0,0 +1,62 @@ +{ + "title": "1 fail, 9 pass in 1s", + "summary": "10 tests   9 :white_check_mark:  1s :stopwatch:\n 1 suites  0 :zzz:\n 1 files    1 :x:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 1, + "tests": 10, + "tests_succ": 9, + "tests_skip": 0, + "tests_fail": 1, + "tests_error": 0, + "runs": 10, + "runs_succ": 9, + "runs_skip": 0, + "runs_fail": 1, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-healthReport.xml [took 0s]", + "title": "UnitTests.MainClassTest.TestFailure failed", + "raw_details": " Expected failure\n Expected: 30\n But was: 20\n at UnitTests.MainClassTest.TestFailure () [0x00000]\n at <0x00000> \n at (wrapper managed-to-native) System.Reflection.MonoMethod:InternalInvoke (object,object[])\n at System.Reflection.MonoMethod.Invoke (System.Object obj, BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x00000]" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 10 tests, see \"Raw output\" for the full list of tests.", + "title": "10 tests found", + "raw_details": "UnitTests.MainClassTest.TestFailure\nUnitTests.MainClassTest.TestMethodUpdateValue\nUnitTests.MainClassTest.TestPropertyValue\nUnitTests.MainClassTest.TestPropertyValue1\nUnitTests.MainClassTest.TestPropertyValue2\nUnitTests.MainClassTest.TestPropertyValue3\nUnitTests.MainClassTest.TestPropertyValue4\nUnitTests.MainClassTest.TestPropertyValue5\nUnitTests.MainClassTest.TestPropertyValue6\nUnitTests.MainClassTest.TestPropertyValue7" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "1", + "tests": "10", + "tests_succ": "9", + "tests_skip": "0", + "tests_fail": "1", + "tests_error": "0", + "runs": "10", + "runs_succ": "9", + "runs_skip": "0", + "runs_fail": "1", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations index 1ae3d028..e9665178 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 1 tests pass, 2 skipped in 0s', 'summary': - '3 tests\u2002\u2003\u20031 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '0s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20032 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '3 tests\u2002\u2003\u20031 :white_check_mark:\u2003\u20030s ' + ':stopwatch:\n1 suites\u2003\u20032 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr' '0JYu/Cz8zKGIMRGBdPCynh3KyKymzfTvlNa2AzJUXSNkBQhFJgjqgDeMbaMPIRnGr48Ud' 'Q63+ZihYOLvhRWwVa/TwbRY24wus/3xFr38m9LXMkS1y7t9x0CQ06CFiWvGx5uWF7dAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.results.json new file mode 100644 index 00000000..7de6de31 --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 1 tests pass, 2 skipped in 0s", + "summary": "3 tests   1 :white_check_mark:  0s :stopwatch:\n1 suites  2 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 0, + "tests": 3, + "tests_succ": 1, + "tests_skip": 2, + "tests_fail": 0, + "tests_error": 0, + "runs": 3, + "runs_succ": 1, + "runs_skip": 2, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 skipped tests, see \"Raw output\" for the full list of skipped tests.", + "title": "2 skipped tests found", + "raw_details": "UnitTests.OtherMainClassTest.TestIgnored\nUnitTests.OtherMainClassTest.TestIgnoredWithText" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 3 tests, see \"Raw output\" for the full list of tests.", + "title": "3 tests found", + "raw_details": "UnitTests.OtherMainClassTest.TestIgnored\nUnitTests.OtherMainClassTest.TestIgnoredWithText\nUnitTests.OtherMainClassTest.TestPropertyValue" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "0", + "tests": "3", + "tests_succ": "1", + "tests_skip": "2", + "tests_fail": "0", + "tests_error": "0", + "runs": "3", + "runs_succ": "1", + "runs_skip": "2", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations index 8859c1d1..dd42640d 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations @@ -7,19 +7,9 @@ 'output': { 'title': 'All 6 tests pass in 35s', 'summary': - '6 tests\u2002\u2003\u20036 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003' - '35s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '1 suites\u2003\u20030 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 ' - 'files\u2004\u2002\u2003\u20030 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '6 tests\u2002\u2003\u20036 :white_check_mark:\u2003\u200335s ' + ':stopwatch:\n1 suites\u2003\u20030 :zzz:\n1 files\u2004\u2002\u2003\u2003' + '0 :x:\n\nResults for commit commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf' '0U8dyiiDv1MiBktpcaqp+jf20pLb2/mwRx8hk05PrCmYtwF8B9MAYUHawjbjpgef3992q' 'MLUpZihZ1E/YlZwFYIhWgxGgwm9e6Z517+aw9nsYfzlrRagyeIi7lF8PMC7eTeEN4AAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.results.json new file mode 100644 index 00000000..2977260b --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.results.json @@ -0,0 +1,53 @@ +{ + "title": "All 6 tests pass in 35s", + "summary": "6 tests   6 :white_check_mark:  35s :stopwatch:\n1 suites  0 :zzz:\n1 files    0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 1, + "duration": 35, + "tests": 6, + "tests_succ": 6, + "tests_skip": 0, + "tests_fail": 0, + "tests_error": 0, + "runs": 6, + "runs_succ": 6, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 6 tests, see \"Raw output\" for the full list of tests.", + "title": "6 tests found", + "raw_details": "testChangePassword\ntestChangePasswordFailEmptyForm\ntestChangePasswordFailNewPasswordKO\ntestChangePasswordFailNewPasswordNotRepeated\ntestChangePasswordFailNewPasswordTooShort\ntestChangePasswordFailOldPasswordKO" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "1", + "duration": "35", + "tests": "6", + "tests_succ": "6", + "tests_skip": "0", + "tests_fail": "0", + "tests_error": "0", + "runs": "6", + "runs_succ": "6", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations index 7a7faddb..4093bcc2 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations @@ -7,28 +7,10 @@ 'output': { 'title': 'All 1 tests pass, 1 skipped in 6s', 'summary': - '1 files\u2004\u20032 suites\u2004\u2003\u20026s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '2 tests\u20031 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '1 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '0 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n2 runs\u2006\u2003' - '2 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '0 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '1 files\u2004\u20032 suites\u2004\u2003\u20026s :stopwatch:\n2 tests\u2003' + '1 :white_check_mark:\u20031 :zzz:\u20030 :x:\n2 runs\u200a\u20032 ' + ':white_check_mark:\u20030 :zzz:\u20030 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv' '0KoLdTCws8YghAvIpgDKuPfPREUu505mINrMMrzkXUN4z5CSNATzBFFAGcJB0I6hHJKe/' 'JRyvwxixX2n9ACDIn2FQrRYTYYbends+Q+fmrlaR1LXLek2zYIBHkxvwh+XlEX1VPdAAA' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.results.json new file mode 100644 index 00000000..2da38697 --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.results.json @@ -0,0 +1,62 @@ +{ + "title": "All 1 tests pass, 1 skipped in 6s", + "summary": "1 files  2 suites   6s :stopwatch:\n2 tests 1 :white_check_mark: 1 :zzz: 0 :x:\n2 runs  2 :white_check_mark: 0 :zzz: 0 :x:\n\nResults for commit commit s.\n", + "conclusion": "success", + "stats": { + "files": 1, + "errors": [], + "suites": 2, + "duration": 6, + "tests": 2, + "tests_succ": 1, + "tests_skip": 1, + "tests_fail": 0, + "tests_error": 0, + "runs": 2, + "runs_succ": 2, + "runs_skip": 0, + "runs_fail": 0, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There is 1 skipped test, see \"Raw output\" for the name of the skipped test.", + "title": "1 skipped test found", + "raw_details": "AddTwoNumbers" + }, + { + "path": ".github", + "start_line": 0, + "end_line": 0, + "annotation_level": "notice", + "message": "There are 2 tests, see \"Raw output\" for the full list of tests.", + "title": "2 tests found", + "raw_details": "AddTwoNumbers\nSubSmokeTest(\"Geo\",\"Geonw\",\"dev1234567\",System.String[])" + } + ], + "check_url": "html", + "formatted": { + "stats": { + "files": "1", + "errors": [], + "suites": "2", + "duration": "6", + "tests": "2", + "tests_succ": "1", + "tests_skip": "1", + "tests_fail": "0", + "tests_error": "0", + "runs": "2", + "runs_succ": "2", + "runs_skip": "0", + "runs_fail": "0", + "runs_error": "0", + "commit": "commit sha" + } + } +} \ No newline at end of file diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations index ec2363d8..bacfefec 100644 --- a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations @@ -7,29 +7,11 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '146 tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' @@ -1934,29 +1916,11 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '146 tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' @@ -3675,29 +3639,11 @@ 'output': { 'title': '140 fail, 6 pass in 14m 11s', 'summary': - '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002' - '14m 11s ' - '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu' - 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n' - '146 tests\u2003\u205f\u2004\u205f\u20046 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '140 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004' - '6 ' - '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te' - 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003' - '0 ' - '[:zzz:](https://github.com/step-security/publish-unit-test-result-act' - 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003' - '144 ' - '[:x:](https://github.com/step-security/publish-unit-test-result-actio' - 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for ' - 'commit commit s.\n\n' + '\u2007\u20071 files\u2004\u2003155 suites\u2004\u2003\u200214m 11s ' + ':stopwatch:\n146 tests\u2003\u2007\u20076 :white_check_mark:\u20030 ' + ':zzz:\u2003140 :x:\n150 runs\u200a\u2003\u2007\u20076 ' + ':white_check_mark:\u20030 :zzz:\u2003144 :x:\n\nResults for commit ' + 'commit s.\n\n' '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr' '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I' 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0' diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.results.json b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.results.json new file mode 100644 index 00000000..1d02ba5a --- /dev/null +++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.results.json @@ -0,0 +1,1313 @@ +{ + "title": "140 fail, 6 pass in 14m 11s", + "summary": "  1 files  155 suites   14m 11s :stopwatch:\n146 tests   6 :white_check_mark: 0 :zzz: 140 :x:\n150 runs    6 :white_check_mark: 0 :zzz: 144 :x:\n\nResults for commit commit s.\n", + "conclusion": "failure", + "stats": { + "files": 1, + "errors": [], + "suites": 155, + "duration": 851, + "tests": 146, + "tests_succ": 6, + "tests_skip": 0, + "tests_fail": 140, + "tests_error": 0, + "runs": 150, + "runs_succ": 6, + "runs_skip": 0, + "runs_fail": 144, + "runs_error": 0, + "commit": "commit sha" + }, + "annotations": [ + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 31s]", + "title": "UI_MyTask_MR_Grid_GridViewValidation(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir4620_21443}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 92de3e3859b5e9d2cb692461ba367ced\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks._My_Requests.Grid.GridValidation.UI_MyTask_MR_Grid_GridViewValidation(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\My Requests\\Grid\\GridValidation.cs:line 29\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "All 2 runs failed: UI_MyTask_MR_Grid_Paging(True,\"chrome\",\"/#/tasks/access-certification/overview\")", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks._My_Requests.Grid.GridValidation.UI_MyTask_MR_Grid_Paging(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\My Requests\\Grid\\GridValidation.cs:line 65" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UI_MyTasks_MR_Paging_ShowPerPage(True,\"chrome\",\"/#/tasks/access-request/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir10904_14349}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 8d83262a43a60462d9eaed6fd8eec81c\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks._My_Requests.Paging.PagingValidation.UI_MyTasks_MR_Paging_ShowPerPage(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\My Requests\\Paging\\PagingValidation.cs:line 30\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_OE_Grid_GridViewValidation(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks._Owner_Election.Grid.GridValidation.UI_MyTask_OE_Grid_GridViewValidation(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Owner Election\\Grid\\GridValidation.cs:line 28" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTasks_OE_ViewOwnersElection(\"/#/tasks/owners-election/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.OwnersElectionPage.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\OwnersElectionPage.cs:line 175\n at MyCompanyUiSettings.Bl.OwnersElectionPage..ctor(IWebDriver driver) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\OwnersElectionPage.cs:line 145\n at MyCompanyUiSettings.Tl.My_Tasks._Owner_Election.Paging.PagingValidation.UI_MyTasks_OE_ViewOwnersElection(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Owner Election\\Paging\\PagingValidation.cs:line 40" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "All 2 runs failed: UI_MyTask_MR_Progress_Approve(True,\"chrome\",\"/#/tasks/access-certification/overview\")", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir12612_29006}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: d099a8dab51ddac1ad57f17fd01208dc\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks._Owner_Election.Progress.ProgressValidation.UI_MyTask_MR_Progress_Approve(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Owner Election\\Progress\\ProgressValidation.cs:line 26\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "All 2 runs failed: UI_MyTask_MR_Progress_Reject(True,\"chrome\",\"/#/tasks/access-certification/overview\")", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks._Owner_Election.Progress.ProgressValidation.UI_MyTask_MR_Progress_Reject(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Owner Election\\Progress\\ProgressValidation.cs:line 74" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ApproveAll_AddCommentYes_TC2689(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Approve_All.ApproveAll.UI_MyTask_AC_ACIS_BulkActions_ApproveAll_AddCommentYes_TC2689(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Approve All\\ApproveAll.cs:line 29" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ApproveAll_CommittedRecoredNotAffected_TC2691(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Approve_All.ApproveAll.UI_MyTask_AC_ACIS_BulkActions_ApproveAll_CommittedRecoredNotAffected_TC2691(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Approve All\\ApproveAll.cs:line 75" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ApproveAll_WithExistingSaved_TC2690(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Approve_All.ApproveAll.UI_MyTask_AC_ACIS_BulkActions_ApproveAll_WithExistingSaved_TC2690(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Approve All\\ApproveAll.cs:line 47" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 11s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_Browsers_Chrome_TC2692(\"/#/tasks/access-certification/overview\") failed", + "raw_details": " Expected: True\n But was: False\nat MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Browsers.Browsers.UI_MyTask_AC_ACIS_BulkActions_Browsers_Chrome_TC2692(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Browsers\\Browsers.cs:line 41" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 6s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ClearAll_ApproveAllNo_TC2707(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.Exception : Base Class - Click(string xpath) method threw an exception : \nunknown error: Element is not clickable at point (80, 241). Other element would receive the click:
...
\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 50 milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2476_1158}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 5cb1002259d4ed7ed523ba2e9e0cea02\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at MyCompanyUiSettings.Bl.Base.Click(String xpath) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 323\n at MyCompanyUiSettings.Bl.Base.Click(String xpath) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 330\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Clear_All.ClearAll.UI_MyTask_AC_ACIS_BulkActions_ClearAll_ApproveAllNo_TC2707(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Clear All\\ClearAll.cs:line 90" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ClearAll_CommittedRecoredNotAffected_TC2708(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Clear_All.ClearAll.UI_MyTask_AC_ACIS_BulkActions_ClearAll_CommittedRecoredNotAffected_TC2708(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Clear All\\ClearAll.cs:line 102" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentNo_TC2705(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Clear_All.ClearAll.UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentNo_TC2705(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Clear All\\ClearAll.cs:line 13" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentYes_TC2706(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Clear_All.ClearAll.UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentYes_TC2706(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Clear All\\ClearAll.cs:line 32" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_ClearAllSelection_TC2712(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Examine_Text_and_Layout.ExaminTextAndLayout.UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_ClearAllSelection_TC2712(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Examine Text and Layout\\ExaminTextAndLayout.cs:line 67" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2709(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Examine_Text_and_Layout.ExaminTextAndLayout.UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2709(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Examine Text and Layout\\ExaminTextAndLayout.cs:line 15" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_RejectAllSelection_TC2711(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Examine_Text_and_Layout.ExaminTextAndLayout.UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_RejectAllSelection_TC2711(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Examine Text and Layout\\ExaminTextAndLayout.cs:line 50" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 35s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2713(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.Exception : Base Class - FindElementsOnPage(string xpath) - 1 parameter - method threw an exception : \nTimed out after 30 seconds\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.WaitForVisibleElement(String xpath) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 297\n at MyCompanyUiSettings.Bl.Base.FindElementsOnPage(String xpath) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 247\n at MyCompanyUiSettings.Bl.Base.FindElementsOnPage(String xpath) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 253\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Load.Load.UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2713(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Load\\Load.cs:line 15" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2714(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Load.Load.UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2714(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Load\\Load.cs:line 34" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 35s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentNo_TC2715(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.Exception : Base Class - FindElementsOnPage(string xpath) - 1 parameter - method threw an exception : \nTimed out after 30 seconds\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.WaitForVisibleElement(String xpath) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 297\n at MyCompanyUiSettings.Bl.Base.FindElementsOnPage(String xpath) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 247\n at MyCompanyUiSettings.Bl.Base.FindElementsOnPage(String xpath) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 253\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Reject_All.ApproveAll.UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentNo_TC2715(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Reject All\\RejectAll.cs:line 14" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentYes_TC2716(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Reject_All.ApproveAll.UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentYes_TC2716(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Reject All\\RejectAll.cs:line 29" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_RejectAll_CommittedRecoredNotAffected_TC2718(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Reject_All.ApproveAll.UI_MyTask_AC_ACIS_BulkActions_RejectAll_CommittedRecoredNotAffected_TC2718(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Reject All\\RejectAll.cs:line 75" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_RejectAll_WithExistingSaved_TC2717(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Reject_All.ApproveAll.UI_MyTask_AC_ACIS_BulkActions_RejectAll_WithExistingSaved_TC2717(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Reject All\\RejectAll.cs:line 47" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_BulkActions_Saving_Saving_IsSynchronous_NoOtherActionCanBeTaken_2722(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Bulk_Acions.Saving.Saving.UI_MyTask_AC_ACIS_BulkActions_Saving_Saving_IsSynchronous_NoOtherActionCanBeTaken_2722(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Bulk Acions\\Saving\\Saving.cs:line 27" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 4s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumApproveAll_TC2741(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : unknown error: Element is not clickable at point (932, 731)\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 65 milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir4700_14237}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 0501eda8a3e393ab97da9ab3839ea770\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Approve_All.Graph.ApproveAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumApproveAll_TC2741(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Approve All\\Graph\\ApproveAllGraph.cs:line 15" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedApproveAll_TC11159(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Approve_All.Graph.ApproveAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedApproveAll_TC11159(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Approve All\\Graph\\ApproveAllGraph.cs:line 65" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionApproveAll_TC2744(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Approve_All.Graph.ApproveAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionApproveAll_TC2744(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Approve All\\Graph\\ApproveAllGraph.cs:line 39" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectApproveAll_TC11160(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Approve_All.Graph.ApproveAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectApproveAll_TC11160(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Approve All\\Graph\\ApproveAllGraph.cs:line 93" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 4s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumClearAll_TC2749(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : unknown error: Element is not clickable at point (932, 731)\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 66 milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir6552_28403}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 5646c3ae0ba7663483cda0a3894fe2a9\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Clear_All.Graph.ClearAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumClearAll_TC2749(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Clear All\\Graph\\ClearAllGraph.cs:line 15" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedClearAll_TC2750(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Clear_All.Graph.ClearAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedClearAll_TC2750(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Clear All\\Graph\\ClearAllGraph.cs:line 46" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionClearAll_TC2752(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Clear_All.Graph.ClearAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionClearAll_TC2752(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Clear All\\Graph\\ClearAllGraph.cs:line 112" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectedClearAll_TC2751(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Clear_All.Graph.ClearAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectedClearAll_TC2751(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Clear All\\Graph\\ClearAllGraph.cs:line 79" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 4s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurtherBy_FilterBy_AllPossibleFields_TC2771(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : unknown error: Element is not clickable at point (932, 731)\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 61 milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir32_9833}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 258bbe17298009e5e47efcf485ebccd3\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Column_Filtering.Filter_further_by.Filter_By.FilterBy.UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurtherBy_FilterBy_AllPossibleFields_TC2771(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Column Filtering\\Filter further by\\Filter By\\FilterBy.cs:line 106" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurtherBy_FilterBy_OneColumnOutOfManyWithAlreadyExistingFilters_TC2768(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Column_Filtering.Filter_further_by.Filter_By.FilterBy.UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurtherBy_FilterBy_OneColumnOutOfManyWithAlreadyExistingFilters_TC2768(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Column Filtering\\Filter further by\\Filter By\\FilterBy.cs:line 54" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurtherBy_FilterBy_OneColumnOutOfManyWithNoExistingFilters_TC2767(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Column_Filtering.Filter_further_by.Filter_By.FilterBy.UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurtherBy_FilterBy_OneColumnOutOfManyWithNoExistingFilters_TC2767(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Column Filtering\\Filter further by\\Filter By\\FilterBy.cs:line 13" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 4s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithOnlyPendingForAction_TC2753(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : unknown error: Element is not clickable at point (932, 731)\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 65 milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2992_31686}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 8397ed2522698ddccb6b0aa573d920e9\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColumn.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithOnlyPendingForAction_TC2753(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Entire Column\\EntireColumn.cs:line 16" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithPendingForActionAndUncommittedApproved_TC2754(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColumn.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithPendingForActionAndUncommittedApproved_TC2754(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Entire Column\\EntireColumn.cs:line 57" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithPendingForActionAndUncommittedReject_TC2755(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColumn.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithPendingForActionAndUncommittedReject_TC2755(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Entire Column\\EntireColumn.cs:line 83" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_WithPendingForActionApprovedCommittedAndRejectedCommitted_TC2758(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColumn.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_WithPendingForActionApprovedCommittedAndRejectedCommitted_TC2758(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Entire Column\\EntireColumn.cs:line 148" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_WithUncommittedApprovedAndUncommittedReject_TC2756(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColumn.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_WithUncommittedApprovedAndUncommittedReject_TC2756(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Entire Column\\EntireColumn.cs:line 111" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 4s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_ColumnEntireColumn_TC7937_TC7927(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : unknown error: Element is not clickable at point (932, 731)\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 67 milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2696_14836}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 6a683eff25d0c058e04394158f5d2245\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Menus.Menus.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_ColumnEntireColumn_TC7937_TC7927(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Menus\\Menus.cs:line 58" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_ColumnPendingAcions_TC7938(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Menus.Menus.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_ColumnPendingAcions_TC7938(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Menus\\Menus.cs:line 100" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_FilterFurtherBy_TC7939(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Menus.Menus.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_FilterFurtherBy_TC7939(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Menus\\Menus.cs:line 140" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_TakeActionOn_TC7936(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Menus.Menus.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_TakeActionOn_TC7936(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Menus\\Menus.cs:line 13" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 4s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumRejectAll_TC2763(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : unknown error: Element is not clickable at point (932, 731)\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 60 milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir10404_20818}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 50f2dfc6d36fd64051d143d025dc8e53\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Reject_All.Graph.RejectAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumRejectAll_TC2763(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Reject All\\Graph\\RejectAllGraph.cs:line 15" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedRejectAll_TC2765(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Reject_All.Graph.RejectAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedRejectAll_TC2765(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Reject All\\Graph\\RejectAllGraph.cs:line 65" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionRejectAll_TC2764(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Reject_All.Graph.RejectAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionRejectAll_TC2764(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Reject All\\Graph\\RejectAllGraph.cs:line 39" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectRejectAll_TC2766(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Chart_Menu.Reject_All.Graph.RejectAllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectRejectAll_TC2766(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Chart Menu\\Reject All\\Graph\\RejectAllGraph.cs:line 93" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 4s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_AllColumnsAvailable_TC2793(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : unknown error: Element is not clickable at point (932, 731)\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 61 milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir3796_31836}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 840df673591317f43b8304ab9db74078\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_AllColumnsAvailable_TC2793(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 16" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_LongNameGetsTC2795(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_LongNameGetsTC2795(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 55" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_TableViewTC2799(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_TableViewTC2799(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 99" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_TextAndColorsTC2794(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_TextAndColorsTC2794(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 34" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_ToolTipTC2796_TC2772(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_ToolTipTC2796_TC2772(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 75" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_ToolTipTC7926(\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certification_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_ToolTipTC7926(String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Certification\\Access Certification Inner Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 121" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_FiltersValidation(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_DataGrid_DataDisplay_TC2101(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_DataGrid_Header_TC2100(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_DataGrid_Navigation_TC2099(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_DataGrid_Paging_TC2102(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_Grid_Grid_Validation(True,\"chrome\",\"#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_TwoUsersTwoApplicationsValidation(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 31s]", + "title": "All 2 runs failed: UI_MyTasks_AR_Paging_ShowPerPage(True,\"chrome\",\"/#/tasks/access-request/overview\")", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_Progress_Approve(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AC_Progress_Reject(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OneTimeSetUp: No suitable constructor was found" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UI_MyTask_AR_Filters_FilterLayout(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 15 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir11804_16895}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 29b4b9836d0675d3828a94e2f11cf9d7\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Filters.FiltersValidation.UI_MyTask_AR_Filters_FilterLayout(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Request\\Filters\\FiltersValidation.cs:line 29\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AR_Filters_FiltersFunctionality(True,\"chrome\",\"/#/tasks/access-request/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Filters.FiltersValidation.UI_MyTask_AR_Filters_FiltersFunctionality(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Request\\Filters\\FiltersValidation.cs:line 83" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UI_MyTask_AR_Grid_FilterVAlidates(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 15 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir12972_27801}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: fca88dd0490c464a5ded2f16849929d8\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Grid.GridValidation.UI_MyTask_AR_Grid_FilterVAlidates(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Request\\Grid\\GridValidation.cs:line 29\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AR_Grid_Paging(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Grid.GridValidation.UI_MyTask_AR_Grid_Paging(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Request\\Grid\\GridValidation.cs:line 65" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UI_MyTask_AR_Progress_Approve(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir3688_21557}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: fc2e027b336637b143a0098139997621\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Progress.ProgressValidation.UI_MyTask_AR_Progress_Approve(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Request\\Progress\\ProgressValidation.cs:line 32\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_AR_Progress_Reject(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Progress.ProgressValidation.UI_MyTask_AR_Progress_Reject(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Access Request\\Progress\\ProgressValidation.cs:line 80" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 31s]", + "title": "UI_MyTask_CC_Filters_FilterVAlidates(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir13304_30088}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: e6e1a454eceffe04daec2df3121843c6\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Grid.GridValidation.UI_MyTask_CC_Filters_FilterVAlidates(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Compliance Control\\Grid\\GridValidation.cs:line 30\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_CC_Grid_Paging(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Grid.GridValidation.UI_MyTask_CC_Grid_Paging(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Compliance Control\\Grid\\GridValidation.cs:line 66" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UI_MyTasks_CC_Paging_ShowPerPage(True,\"chrome\",\"/#/tasks/access-request/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir6532_29346}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: b5311e179a7c4fac0e8285b86e566664\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Paging.PagingValidation.UI_MyTasks_CC_Paging_ShowPerPage(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Compliance Control\\Paging\\PagingValidation.cs:line 24\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UI_MyTask_CC_Progress_Approve(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir12668_24175}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 1a60859e82be5a9504866d8d9e6b21ba\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Progress.ProgressValidation.UI_MyTask_CC_Progress_Approve(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Compliance Control\\Progress\\ProgressValidation.cs:line 27\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_CC_Progress_Reject(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Progress.ProgressValidation.UI_MyTask_CC_Progress_Reject(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Compliance Control\\Progress\\ProgressValidation.cs:line 76" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UI_MyTask_CC_Filters_FilterLayout(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 16 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir10360_6306}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 68b0320c39a561808d45f7b1bd2ce18e\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks_Compliance_Control.Filters.FiltersValidation.UI_MyTask_CC_Filters_FilterLayout(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Compliance Control\\Filters\\FiltersValidation.cs:line 30\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_CC_Filters_FiltersFunctionality(True,\"chrome\",\"/#/tasks/access-request/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks_Compliance_Control.Filters.FiltersValidation.UI_MyTask_CC_Filters_FiltersFunctionality(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\Compliance Control\\Filters\\FiltersValidation.cs:line 69" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UI_MyTask_MR_Filters_FilterLayout(True,\"chrome\",\"/#/tasks/access-certification/overview\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2736_22908}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 52ab857fbeb80383ec0a4311504f7b8e\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks_My_Requests.Filters.FiltersValidation.UI_MyTask_MR_Filters_FilterLayout(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\My Requests\\Filters\\FiltersValidation.cs:line 22\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UI_MyTask_MR_Filters_FiltersFunctionality(True,\"chrome\",\"/#/tasks/access-request/overview\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.My_Tasks_My_Requests.Filters.FiltersValidation.UI_MyTask_MR_Filters_FiltersFunctionality(Boolean excute, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My Tasks\\My Requests\\Filters\\FiltersValidation.cs:line 78" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UIAlertExclusionAddAccountsFromSearch_TC7466(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir3016_20227}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: c9411ed622920bbdad53147bc36fd09b\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionAddAccountsFromSearch_TC7466(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 76\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionBulkActionsCoverage_TC7465(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionBulkActionsCoverage_TC7465(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 111" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionBulkUploadCoverage_TC7467_TC7468(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionBulkUploadCoverage_TC7467_TC7468(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 575" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionBulkUploadDownloadSampleFile_TC7464(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionBulkUploadDownloadSampleFile_TC7464(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 155" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionColumns_TC7474(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionColumns_TC7474(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 204" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionGridCoverage_TC7465(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\",\"u0g793,u1g1,u1g792,u1g802,u2g399,u2g8...\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionGridCoverage_TC7465(Boolean excute, String language, String itteration, String browserName, String url, String names) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 532" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionLoadSameAccountTwice_TC7473(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionLoadSameAccountTwice_TC7473(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 301" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionNonCsvFormat_TC7472(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionNonCsvFormat_TC7472(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 349" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionPaginationCoverage_TC7471(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\",\"u0g791,u0g801,u1g791,u1g801,u2g791,u2...\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionPaginationCoverage_TC7471(Boolean excute, String language, String itteration, String browserName, String url, String names) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 32" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionRemoveAccounts_TC7470(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionRemoveAccounts_TC7470(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 397" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionScreenOverviewLook_TC7465(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionScreenOverviewLook_TC7465(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 248" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionSearchCurrentExcludedAccounts_TC7475(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionSearchCurrentExcludedAccounts_TC7475(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 488" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIAlertExclusionShowPerPageCoverage_TC7465(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UIAlertExclusionShowPerPageCoverage_TC7465(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Alert Exclusions\\AlertExclusions.cs:line 447" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UIDataOwnerExclusionAddAccountsFromSearch_TC3411(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir9916_12885}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: d3eacb9d6fac9a67fa47aa82158da43c\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionAddAccountsFromSearch_TC3411(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 142\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionBulkActionsCoverage_TC7554_TC3415(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionBulkActionsCoverage_TC7554_TC3415(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 180" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionBulkUploadCoverage_TC3412_TC3413(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionBulkUploadCoverage_TC3412_TC3413(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 78" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionColumns_TC3419(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionColumns_TC3419(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 223" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionGridCoverage_TC7554(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\",\"u0g793,u1g1,u1g792,u1g802,u2g399,u2g8...\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionGridCoverage_TC7554(Boolean excute, String language, String itteration, String browserName, String url, String names) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 267" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionLoadSameAccountTwice_TC3418(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionLoadSameAccountTwice_TC3418(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 309" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionNonCsvFormat_TC3417(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionNonCsvFormat_TC3417(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 31" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionPaginationCoverage_TC7554_TC3415(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\",\"u0g106,u0g115,u0g124,u0g133,u0g142,u0...\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionPaginationCoverage_TC7554_TC3415(Boolean excute, String language, String itteration, String browserName, String url, String names) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 355" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionSearchCurrentExcludedAccounts_TC3420(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionSearchCurrentExcludedAccounts_TC3420(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 398" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIDataOwnerExclusionShowPerPageCoverage_TC7554(True,\"en\",\"1\",\"chrome\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExclusions.UIDataOwnerExclusionShowPerPageCoverage_TC7554(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 438" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 30s]", + "title": "UIGeneralSettingsAllowPhysicalPath_TC10766(True,\"1\",\"abcd\",\"chrome\",\"en\",\"user,ra_user\",\"crowdSource,whiteops\",\"user\",\"is_administrator\",\"/#/settings/general-settings\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 0 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir7348_16522}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: a9460966896b2f67901d0c200c612026\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settings.UIGeneralSettingsAllowPhysicalPath_TC10766(Boolean excute, String itteration, String account, String browserName, String language, String dbTables, String dbSchema, String tableName, String columnName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Logical Mapped Path\\Settings\\Settings.cs:line 266\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIGeneralSettingsDataDisplay_TC10898(True,\"1\",\"abcd\",\"chrome\",\"en\",\"user,ra_user\",\"crowdSource,whiteops\",\"user\",\"is_administrator\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settings.UIGeneralSettingsDataDisplay_TC10898(Boolean excute, String itteration, String account, String browserName, String language, String dbTables, String dbSchema, String tableName, String columnName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Logical Mapped Path\\Settings\\Settings.cs:line 75" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIGeneralSettingsExcludeAdministrator_TC10765(True,\"1\",\"abcd\",\"chrome\",\"en\",\"user,ra_user\",\"crowdSource,whiteops\",\"user\",\"is_administrator\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settings.UIGeneralSettingsExcludeAdministrator_TC10765(Boolean excute, String itteration, String account, String browserName, String language, String dbTables, String dbSchema, String tableName, String columnName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Logical Mapped Path\\Settings\\Settings.cs:line 192" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIGeneralSettingsNavigation_TC10897(True,\"1\",\"abcd\",\"chrome\",\"en\",\"user,ra_user\",\"crowdSource,whiteops\",\"user\",\"is_administrator\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settings.UIGeneralSettingsNavigation_TC10897(Boolean excute, String itteration, String account, String browserName, String language, String dbTables, String dbSchema, String tableName, String columnName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Logical Mapped Path\\Settings\\Settings.cs:line 36" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIGeneralSettingsTranslatePhysicalPath_TC10764(True,\"1\",\"abcd\",\"chrome\",\"en\",\"user,ra_user\",\"crowdSource,whiteops\",\"user\",\"is_administrator\",\"/#/settings/general-settings\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settings.UIGeneralSettingsTranslatePhysicalPath_TC10764(Boolean excute, String itteration, String account, String browserName, String language, String dbTables, String dbSchema, String tableName, String columnName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Logical Mapped Path\\Settings\\Settings.cs:line 119" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 31s]", + "title": "UIMessageTemplatesCompanyInformationCoverage_TC3422_TC7534(True,\"en\",\"1\",\"chrome\",\"/#/settings/crowd-messages/welcome-me...\") failed", + "raw_details": "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such element: Unable to locate element: {\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}\n (Session info: chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 (62ebf098771772160f391d75e589dc567915b233),platform=Windows NT 6.3.9600 x86_64) (WARNING: The server did not provide any stacktrace information)\nCommand duration or timeout: 16 milliseconds\nFor documentation on this error, please visit: http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 -0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: '6.3', java.version: '1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\nCapabilities [{applicationCacheEnabled=false, rotatable=false, mobileEmulationEnabled=false, networkConnectionEnabled=false, chrome={chromedriverVersion=2.29.461591 (62ebf098771772160f391d75e589dc567915b233), userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2232_22398}, takesHeapSnapshot=true, pageLoadStrategy=normal, databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, version=58.0.3029.110, platform=WIN8_1, browserConnectionEnabled=false, nativeEvents=true, acceptSslCerts=true, locationContextEnabled=true, webStorageEnabled=true, browserName=chrome, takesScreenshot=true, javascriptEnabled=true, cssSelectorsEnabled=true, unexpectedAlertBehaviour=}]\nSession ID: 882c55bf9c675e183d7269fae3076ce9\n*** Element info: {Using=xpath, value=//span[@translate='_Loading_']}\n at OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(String exceptionMessage, Exception lastException)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Messages.MessagesTests.UIMessageTemplatesCompanyInformationCoverage_TC3422_TC7534(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Messages\\MessagesTests.cs:line 33\n--NoSuchElementException\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIMessageTemplatesCompanyInformationFunctionality_TC3422_TC7534(True,\"en\",\"1\",\"chrome\",\"/#/settings/crowd-messages/welcome-me...\",\"google\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Messages.MessagesTests.UIMessageTemplatesCompanyInformationFunctionality_TC3422_TC7534(Boolean excute, String language, String itteration, String browserName, String url, String companyName) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Messages\\MessagesTests.cs:line 79" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIMessageTemplatesCumulativeReminderScheduling_TC3426(True,\"en\",\"1\",\"chrome\",\"/#/settings/crowd-messages/welcome-me...\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Messages.MessagesTests.UIMessageTemplatesCumulativeReminderScheduling_TC3426(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Messages\\MessagesTests.cs:line 116" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIMessageTemplatesDiscardChanges_TC3425(True,\"en\",\"1\",\"chrome\",\"/#/settings/crowd-messages/welcome-me...\") failed", + "raw_details": "System.InvalidOperationException : Session [(null externalkey)] not available and is not among the last 1000 terminated sessions.\nActive sessions are[]\n at OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Response errorResponse)\n at OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String driverCommandToExecute, Dictionary`2 parameters)\n at OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String mechanism, String value)\n at OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.b__12(IWebDriver driver)\n at OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base.cs:line 537\n at MyCompanyUiSettings.Tl.Settings.Messages.MessagesTests.UIMessageTemplatesDiscardChanges_TC3425(Boolean excute, String language, String itteration, String browserName, String url) in C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Settings\\Messages\\MessagesTests.cs:line 172" + }, + { + "path": "/", + "start_line": 0, + "end_line": 0, + "annotation_level": "warning", + "message": "nunit3/jenkins/NUnit-issue44527.xml [took 0s]", + "title": "UIMessageTemplatesHtmlEditor_TC3424(True,\"en\",\"1\",\"chrome\",\"/#/settings/crowd-messages/welcome-me...\",\"