diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index 6d9b74cc0..000000000 --- a/.editorconfig +++ /dev/null @@ -1,37 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true -indent_size = 4 -indent_style = space - -[*.{md,yml,yaml,html,css,scss,js}] -indent_size = 2 - -# These files are edited and tested upstream in nf-core/modules -[/modules/nf-core/**] -charset = unset -end_of_line = unset -insert_final_newline = unset -trim_trailing_whitespace = unset -indent_style = unset -[/subworkflows/nf-core/**] -charset = unset -end_of_line = unset -insert_final_newline = unset -trim_trailing_whitespace = unset -indent_style = unset - -[/assets/email*] -indent_size = unset - -# ignore python and markdown -[*.{py,md}] -indent_style = unset - -# ignore ro-crate metadata files -[**/ro-crate-metadata.json] -insert_final_newline = unset diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 46fbf85c2..7abddde1d 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -78,7 +78,7 @@ If you wish to contribute a new step, please use the following coding standards: 5. Add any new parameters to `nextflow_schema.json` with help text (via the `nf-core pipelines schema build` tool). 6. Add sanity checks and validation for all relevant parameters. 7. Perform local tests to validate that the new code works as expected. -8. If applicable, add a new test command in `.github/workflow/ci.yml`. +8. If applicable, add a new test in the `tests` directory. 9. Update MultiQC config `assets/multiqc_config.yml` so relevant suffixes, file name clean up and module plots are in the appropriate order. If applicable, add a [MultiQC](https://https://multiqc.info/) module. 10. Add a description of the output files and if relevant any appropriate images from the MultiQC report to `docs/output.md`. diff --git a/.github/actions/get-shards/action.yml b/.github/actions/get-shards/action.yml new file mode 100644 index 000000000..34085279f --- /dev/null +++ b/.github/actions/get-shards/action.yml @@ -0,0 +1,69 @@ +name: "Get number of shards" +description: "Get the number of nf-test shards for the current CI job" +inputs: + max_shards: + description: "Maximum number of shards allowed" + required: true + paths: + description: "Component paths to test" + required: false + tags: + description: "Tags to pass as argument for nf-test --tag parameter" + required: false +outputs: + shard: + description: "Array of shard numbers" + value: ${{ steps.shards.outputs.shard }} + total_shards: + description: "Total number of shards" + value: ${{ steps.shards.outputs.total_shards }} +runs: + using: "composite" + steps: + - name: Install nf-test + uses: nf-core/setup-nf-test@v1 + with: + version: ${{ env.NFT_VER }} + - name: Get number of shards + id: shards + shell: bash + run: | + # Run nf-test with dynamic parameter + nftest_output=$(nf-test test \ + --profile +docker \ + $(if [ -n "${{ inputs.tags }}" ]; then echo "--tag ${{ inputs.tags }}"; fi) \ + --dry-run \ + --ci \ + --changed-since HEAD^) || { + echo "nf-test command failed with exit code $?" + echo "Full output: $nftest_output" + exit 1 + } + echo "nf-test dry-run output: $nftest_output" + + # Default values for shard and total_shards + shard="[]" + total_shards=0 + + # Check if there are related tests + if echo "$nftest_output" | grep -q 'No tests to execute'; then + echo "No related tests found." + else + # Extract the number of related tests + number_of_shards=$(echo "$nftest_output" | sed -n 's|.*Executed \([0-9]*\) tests.*|\1|p') + if [[ -n "$number_of_shards" && "$number_of_shards" -gt 0 ]]; then + shards_to_run=$(( $number_of_shards < ${{ inputs.max_shards }} ? $number_of_shards : ${{ inputs.max_shards }} )) + shard=$(seq 1 "$shards_to_run" | jq -R . | jq -c -s .) + total_shards="$shards_to_run" + else + echo "Unexpected output format. Falling back to default values." + fi + fi + + # Write to GitHub Actions outputs + echo "shard=$shard" >> $GITHUB_OUTPUT + echo "total_shards=$total_shards" >> $GITHUB_OUTPUT + + # Debugging output + echo "Final shard array: $shard" + echo "Total number of shards: $total_shards" diff --git a/.github/actions/nf-test/action.yml b/.github/actions/nf-test/action.yml new file mode 100644 index 000000000..243e78238 --- /dev/null +++ b/.github/actions/nf-test/action.yml @@ -0,0 +1,113 @@ +name: "nf-test Action" +description: "Runs nf-test with common setup steps" +inputs: + profile: + description: "Profile to use" + required: true + shard: + description: "Shard number for this CI job" + required: true + total_shards: + description: "Total number of test shards(NOT the total number of matrix jobs)" + required: true + paths: + description: "Test paths" + required: true + tags: + description: "Tags to pass as argument for nf-test --tag parameter" + required: false +runs: + using: "composite" + steps: + - name: Setup Nextflow + uses: nf-core/setup-nextflow@v2 + with: + version: "${{ env.NXF_VERSION }}" + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: "3.13" + + - name: Install nf-test + uses: nf-core/setup-nf-test@v1 + with: + version: "${{ env.NFT_VER }}" + install-pdiff: true + + - name: Setup apptainer + if: contains(inputs.profile, 'singularity') + uses: eWaterCycle/setup-apptainer@main + + - name: Set up Singularity + if: contains(inputs.profile, 'singularity') + shell: bash + run: | + mkdir -p $NXF_SINGULARITY_CACHEDIR + mkdir -p $NXF_SINGULARITY_LIBRARYDIR + + - name: Conda setup + if: contains(inputs.profile, 'conda') + uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3 + with: + auto-update-conda: true + conda-solver: libmamba + conda-remove-defaults: true + + # TODO Skip failing conda tests and document their failures + # https://github.com/nf-core/modules/issues/7017 + - name: Run nf-test + shell: bash + env: + NFT_DIFF: ${{ env.NFT_DIFF }} + NFT_DIFF_ARGS: ${{ env.NFT_DIFF_ARGS }} + NFT_WORKDIR: ${{ env.NFT_WORKDIR }} + run: | + nf-test test \ + --profile=+${{ inputs.profile }} \ + $(if [ -n "${{ inputs.tags }}" ]; then echo "--tag ${{ inputs.tags }}"; fi) \ + --ci \ + --changed-since HEAD^ \ + --verbose \ + --tap=test.tap \ + --shard ${{ inputs.shard }}/${{ inputs.total_shards }} + + # Save the absolute path of the test.tap file to the output + echo "tap_file_path=$(realpath test.tap)" >> $GITHUB_OUTPUT + + - name: Generate test summary + if: always() + shell: bash + run: | + # Add header if it doesn't exist (using a token file to track this) + if [ ! -f ".summary_header" ]; then + echo "# 🚀 nf-test results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Status | Test Name | Profile | Shard |" >> $GITHUB_STEP_SUMMARY + echo "|:------:|-----------|---------|-------|" >> $GITHUB_STEP_SUMMARY + touch .summary_header + fi + + if [ -f test.tap ]; then + while IFS= read -r line; do + if [[ $line =~ ^ok ]]; then + test_name="${line#ok }" + # Remove the test number from the beginning + test_name="${test_name#* }" + echo "| ✅ | ${test_name} | ${{ inputs.profile }} | ${{ inputs.shard }}/${{ inputs.total_shards }} |" >> $GITHUB_STEP_SUMMARY + elif [[ $line =~ ^not\ ok ]]; then + test_name="${line#not ok }" + # Remove the test number from the beginning + test_name="${test_name#* }" + echo "| ❌ | ${test_name} | ${{ inputs.profile }} | ${{ inputs.shard }}/${{ inputs.total_shards }} |" >> $GITHUB_STEP_SUMMARY + fi + done < test.tap + else + echo "| ⚠️ | No test results found | ${{ inputs.profile }} | ${{ inputs.shard }}/${{ inputs.total_shards }} |" >> $GITHUB_STEP_SUMMARY + fi + + - name: Clean up + if: always() + shell: bash + run: | + sudo rm -rf /home/ubuntu/tests/ diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml new file mode 100644 index 000000000..7721d0009 --- /dev/null +++ b/.github/workflows/awsfulltest.yml @@ -0,0 +1,52 @@ +name: nf-core AWS full size tests +# This workflow is triggered on PRs opened against the main/master branch. +# It can be additionally triggered manually with GitHub actions workflow dispatch button. +# It runs the -profile 'test_full' on AWS batch + +on: + workflow_dispatch: + pull_request_review: + types: [submitted] + release: + types: [published] + +jobs: + run-platform: + name: Run AWS full tests + # run only if the PR is approved by at least 2 reviewers and against the master/main branch or manually triggered + if: github.repository == 'nf-core/rnaseq' && github.event.review.state == 'approved' && (github.event.pull_request.base.ref == 'master' || github.event.pull_request.base.ref == 'main') || github.event_name == 'workflow_dispatch' || github.event_name == 'release' + runs-on: ubuntu-latest + strategy: + matrix: + aligner: ["star_salmon", "star_rsem"] + steps: + - name: Set revision variable + id: revision + run: | + echo "revision=${{ (github.event_name == 'workflow_dispatch' || github.event_name == 'release') && github.sha || 'dev' }}" >> "$GITHUB_OUTPUT" + + - name: Launch workflow via Seqera Platform + uses: seqeralabs/action-tower-launch@v2 + # TODO nf-core: You can customise AWS full pipeline tests as required + # Add full size test data (but still relatively small datasets for few samples) + # on the `test_full.config` test runs with only one set of parameters + with: + workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} + access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} + compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} + revision: ${{ steps.revision.outputs.revision }} + workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/rnaseq/work-${{ steps.revision.outputs.revision }} + parameters: | + { + "hook_url": "${{ secrets.MEGATESTS_ALERTS_SLACK_HOOK_URL }}", + "aligner": "${{ matrix.aligner }}", + "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/rnaseq/results-${{ steps.revision.outputs.revision }}" + } + profiles: test_full + + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: Seqera Platform debug log file + path: | + seqera_platform_action_*.log + seqera_platform_action_*.json diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml new file mode 100644 index 000000000..f5c170fd7 --- /dev/null +++ b/.github/workflows/awstest.yml @@ -0,0 +1,33 @@ +name: nf-core AWS test +# This workflow can be triggered manually with the GitHub actions workflow dispatch button. +# It runs the -profile 'test' on AWS batch + +on: + workflow_dispatch: +jobs: + run-platform: + name: Run AWS tests + if: github.repository == 'nf-core/rnaseq' + runs-on: ubuntu-latest + steps: + # Launch workflow using Seqera Platform CLI tool action + - name: Launch workflow via Seqera Platform + uses: seqeralabs/action-tower-launch@v2 + with: + workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} + access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} + compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} + revision: ${{ github.sha }} + workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/rnaseq/work-${{ github.sha }} + parameters: | + { + "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/rnaseq/results-test-${{ github.sha }}" + } + profiles: test + + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: Seqera Platform debug log file + path: | + seqera_platform_action_*.log + seqera_platform_action_*.json diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 4eac24b97..000000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,148 +0,0 @@ -name: nf-core CI -# This workflow runs the pipeline with the minimal test dataset to check that it completes without any syntax errors -on: - pull_request: - release: - types: [published] - workflow_dispatch: - -env: - NFT_DIFF: "pdiff" - NFT_DIFF_ARGS: "--line-numbers --width 120 --expand-tabs=2" - NFT_VER: "0.9.0" - NFT_WORKDIR: "~" - NXF_ANSI_LOG: false - NXF_SINGULARITY_CACHEDIR: ${{ github.workspace }}/.singularity - NXF_SINGULARITY_LIBRARYDIR: ${{ github.workspace }}/.singularity - -concurrency: - group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" - cancel-in-progress: true -jobs: - nf-test-changes: - name: Check for changes - runs-on: ubuntu-latest - outputs: - nf_test_files: ${{ steps.list.outputs.components }} - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: List nf-test files - id: list - uses: adamrtalbot/detect-nf-test-changes@v0.0.2 - with: - head: ${{ github.sha }} - base: origin/${{ github.base_ref }} - include: .github/include.yaml - - - name: print list of nf-test files - run: | - echo ${{ steps.list.outputs.components }} - - test: - name: "Run tests (${{ matrix.nf_test_files }} ${{ matrix.profile }} NF-${{ matrix.NXF_VER }})" - needs: [nf-test-changes] - if: needs.nf-test-changes.outputs.nf_test_files != '[]' - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - NXF_VER: - - "24.04.2" - - "latest-everything" - nf_test_files: ["${{ fromJson(needs.nf-test-changes.outputs.nf_test_files) }}"] - profile: - - "docker" - - "singularity" - isMaster: - - ${{ github.base_ref == 'master' }} - # Exclude conda and singularity on dev - exclude: - - isMaster: false - profile: "singularity" - - NXF_VER: "latest-everything" - profile: "singularity" - steps: - - name: Check out pipeline code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - with: - fetch-depth: 0 - - - uses: actions/setup-java@8df1039502a15bceb9433410b1a100fbe190c53b # v4 - with: - distribution: "temurin" - java-version: "17" - - - name: Set up Nextflow - uses: nf-core/setup-nextflow@v2 - with: - version: "${{ matrix.NXF_VER }}" - - - name: Set up Apptainer - if: matrix.profile == 'singularity' - uses: eWaterCycle/setup-apptainer@main - - - name: Set up Singularity - if: matrix.profile == 'singularity' - run: | - mkdir -p $NXF_SINGULARITY_CACHEDIR - mkdir -p $NXF_SINGULARITY_LIBRARYDIR - - - name: Install nf-test - uses: nf-core/setup-nf-test@v1 - with: - version: ${{ env.NFT_VER }} - - - uses: actions/setup-python@v4 - with: - python-version: "3.11" - architecture: "x64" - - - name: Install pdiff to see diff between nf-test snapshots - run: | - python -m pip install --upgrade pip - pip install pdiff - - - name: Clean up Disk space - uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 - - - name: Run nf-test - run: | - nf-test test --verbose ${{ matrix.nf_test_files }} --profile "+${{ matrix.profile }}" --junitxml=test.xml --tap=test.tap - - - uses: pcolby/tap-summary@v1 - with: - path: >- - test.tap - - - name: Output log on failure - if: failure() - run: | - sudo apt install bat > /dev/null - batcat --decorations=always --color=always ${{ github.workspace }}/.nf-test/tests/*/meta/nextflow.log - - - name: Publish Test Report - uses: mikepenz/action-junit-report@v3 - if: always() # always run even if the previous step fails - with: - report_paths: test.xml - - confirm-pass: - runs-on: ubuntu-latest - needs: [test] - if: always() - steps: - - name: All tests ok - if: ${{ !contains(needs.*.result, 'failure') }} - run: exit 0 - - name: One or more tests failed - if: ${{ contains(needs.*.result, 'failure') }} - run: exit 1 - - - name: debug-print - if: always() - run: | - echo "toJSON(needs) = ${{ toJSON(needs) }}" - echo "toJSON(needs.*.result) = ${{ toJSON(needs.*.result) }}" diff --git a/.github/workflows/clean-up.yml b/.github/workflows/clean-up.yml index 0b6b1f272..ac030fd58 100644 --- a/.github/workflows/clean-up.yml +++ b/.github/workflows/clean-up.yml @@ -10,7 +10,7 @@ jobs: issues: write pull-requests: write steps: - - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9 + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9 with: stale-issue-message: "This issue has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment otherwise this issue will be closed in 20 days." stale-pr-message: "This PR has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment if it is still useful." diff --git a/.github/workflows/cloud_tests_full.yml b/.github/workflows/cloud_tests_full.yml index c7ba753bb..139f165db 100644 --- a/.github/workflows/cloud_tests_full.yml +++ b/.github/workflows/cloud_tests_full.yml @@ -1,8 +1,6 @@ name: full-sized tests on cloud providers run-name: Submitting workflow to all cloud providers using full sized data on: - release: - types: [published] workflow_dispatch: inputs: platform: diff --git a/.github/workflows/download_pipeline.yml b/.github/workflows/download_pipeline.yml index ab06316ea..999bcc382 100644 --- a/.github/workflows/download_pipeline.yml +++ b/.github/workflows/download_pipeline.yml @@ -12,14 +12,6 @@ on: required: true default: "dev" pull_request: - types: - - opened - - edited - - synchronize - branches: - - main - - master - pull_request_target: branches: - main - master @@ -52,9 +44,9 @@ jobs: - name: Disk space cleanup uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 - - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: - python-version: "3.12" + python-version: "3.13" architecture: "x64" - name: Setup Apptainer @@ -120,6 +112,7 @@ jobs: echo "IMAGE_COUNT_AFTER=$image_count" >> "$GITHUB_OUTPUT" - name: Compare container image counts + id: count_comparison run: | if [ "${{ steps.count_initial.outputs.IMAGE_COUNT_INITIAL }}" -ne "${{ steps.count_afterwards.outputs.IMAGE_COUNT_AFTER }}" ]; then initial_count=${{ steps.count_initial.outputs.IMAGE_COUNT_INITIAL }} @@ -132,3 +125,10 @@ jobs: else echo "The pipeline can be downloaded successfully!" fi + + - name: Upload Nextflow logfile for debugging purposes + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: nextflow_logfile.txt + path: .nextflow.log* + include-hidden-files: true diff --git a/.github/workflows/fix-linting.yml b/.github/workflows/fix_linting.yml similarity index 96% rename from .github/workflows/fix-linting.yml rename to .github/workflows/fix_linting.yml index b7a354b23..62a34d5ef 100644 --- a/.github/workflows/fix-linting.yml +++ b/.github/workflows/fix_linting.yml @@ -32,9 +32,9 @@ jobs: GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }} # Install and run pre-commit - - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: - python-version: "3.12" + python-version: "3.13" - name: Install pre-commit run: pip install pre-commit diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index dbd52d5a2..f2d7d1dd7 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -3,9 +3,6 @@ name: nf-core linting # It runs the `nf-core pipelines lint` and markdown lint tests to ensure # that the code meets the nf-core guidelines. on: - push: - branches: - - dev pull_request: release: types: [published] @@ -17,9 +14,9 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Set up Python 3.12 - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: - python-version: "3.12" + python-version: "3.13" - name: Install pre-commit run: pip install pre-commit @@ -36,13 +33,13 @@ jobs: - name: Install Nextflow uses: nf-core/setup-nextflow@v2 - - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: - python-version: "3.12" + python-version: "3.13" architecture: "x64" - name: read .nf-core.yml - uses: pietrobolcato/action-read-yaml@1.1.0 + uses: pietrobolcato/action-read-yaml@9f13718d61111b69f30ab4ac683e67a56d254e1d # 1.1.0 id: read_yml with: config: ${{ github.workspace }}/.nf-core.yml @@ -74,7 +71,7 @@ jobs: - name: Upload linting log file artifact if: ${{ always() }} - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: linting-logs path: | diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml index 95b6b6af8..7e8050fb8 100644 --- a/.github/workflows/linting_comment.yml +++ b/.github/workflows/linting_comment.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download lint results - uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8 + uses: dawidd6/action-download-artifact@4c1e823582f43b179e2cbb49c3eade4e41f992e2 # v10 with: workflow: linting.yml workflow_conclusion: completed @@ -21,7 +21,7 @@ jobs: run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT - name: Post PR comment - uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 # v2 + uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} number: ${{ steps.pr_number.outputs.pr_number }} diff --git a/.github/workflows/nf-test.yml b/.github/workflows/nf-test.yml new file mode 100644 index 000000000..8a0bb298c --- /dev/null +++ b/.github/workflows/nf-test.yml @@ -0,0 +1,144 @@ +name: Run nf-test +on: + push: + paths-ignore: + - "docs/**" + - "**/meta.yml" + - "**/*.md" + - "**/*.png" + - "**/*.svg" + pull_request: + paths-ignore: + - "docs/**" + - "**/meta.yml" + - "**/*.md" + - "**/*.png" + - "**/*.svg" + release: + types: [published] + workflow_dispatch: + +# Cancel if a newer run is started +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NFT_DIFF: "pdiff" + NFT_DIFF_ARGS: "--line-numbers --width 120 --expand-tabs=2" + NFT_VER: "0.9.2" + NFT_WORKDIR: "~" + NXF_ANSI_LOG: false + NXF_SINGULARITY_CACHEDIR: ${{ github.workspace }}/.singularity + NXF_SINGULARITY_LIBRARYDIR: ${{ github.workspace }}/.singularity + +jobs: + nf-test-changes: + name: nf-test-changes + runs-on: # use self-hosted runners + - runs-on=$-nf-test-changes + - runner=4cpu-linux-x64 + outputs: + shard: ${{ steps.set-shards.outputs.shard }} + total_shards: ${{ steps.set-shards.outputs.total_shards }} + steps: + - name: Clean Workspace # Purge the workspace in case it's running on a self-hosted runner + run: | + ls -la ./ + rm -rf ./* || true + rm -rf ./.??* || true + ls -la ./ + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + fetch-depth: 0 + + - name: get number of shards + id: set-shards + uses: ./.github/actions/get-shards + env: + NFT_VER: ${{ env.NFT_VER }} + with: + max_shards: 7 + + - name: debug + run: | + echo ${{ steps.set-shards.outputs.shard }} + echo ${{ steps.set-shards.outputs.total_shards }} + + nf-test: + name: "${{ matrix.profile }} | ${{ matrix.NXF_VER }} | ${{ matrix.shard }}/${{ needs.nf-test-changes.outputs.total_shards }}" + needs: [nf-test-changes] + if: ${{ needs.nf-test-changes.outputs.total_shards != '0' }} + runs-on: # use self-hosted runners + - runs-on=$-nf-test + - runner=4cpu-linux-x64 + strategy: + fail-fast: false + matrix: + shard: ${{ fromJson(needs.nf-test-changes.outputs.shard) }} + profile: [conda, docker, singularity] + isMain: + - ${{ github.base_ref == 'master' || github.base_ref == 'main' }} + # Exclude conda and singularity on dev + exclude: + - isMain: false + profile: "conda" + - isMain: false + profile: "singularity" + NXF_VER: + - "24.04.2" + - "latest-everything" + env: + NXF_ANSI_LOG: false + TOTAL_SHARDS: ${{ needs.nf-test-changes.outputs.total_shards }} + + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + fetch-depth: 0 + + - name: Run nf-test + uses: ./.github/actions/nf-test + env: + NFT_DIFF: ${{ env.NFT_DIFF }} + NFT_DIFF_ARGS: ${{ env.NFT_DIFF_ARGS }} + NFT_WORKDIR: ${{ env.NFT_WORKDIR }} + with: + profile: ${{ matrix.profile }} + shard: ${{ matrix.shard }} + total_shards: ${{ env.TOTAL_SHARDS }} + confirm-pass: + needs: [nf-test] + if: always() + runs-on: # use self-hosted runners + - runs-on=$-confirm-pass + - runner=2cpu-linux-x64 + steps: + - name: One or more tests failed + if: ${{ contains(needs.*.result, 'failure') }} + run: exit 1 + + - name: One or more tests cancelled + if: ${{ contains(needs.*.result, 'cancelled') }} + run: exit 1 + + - name: All tests ok + if: ${{ contains(needs.*.result, 'success') }} + run: exit 0 + + - name: debug-print + if: always() + run: | + echo "::group::DEBUG: `needs` Contents" + echo "DEBUG: toJSON(needs) = ${{ toJSON(needs) }}" + echo "DEBUG: toJSON(needs.*.result) = ${{ toJSON(needs.*.result) }}" + echo "::endgroup::" + + - name: Clean Workspace # Purge the workspace in case it's running on a self-hosted runner + if: always() + run: | + ls -la ./ + rm -rf ./* || true + rm -rf ./.??* || true + ls -la ./ diff --git a/.github/workflows/release-announcements.yml b/.github/workflows/release-announcements.yml index 76a9e67ed..4abaf4843 100644 --- a/.github/workflows/release-announcements.yml +++ b/.github/workflows/release-announcements.yml @@ -30,7 +30,7 @@ jobs: bsky-post: runs-on: ubuntu-latest steps: - - uses: zentered/bluesky-post-action@80dbe0a7697de18c15ad22f4619919ceb5ccf597 # v0.1.0 + - uses: zentered/bluesky-post-action@4aa83560bb3eac05dbad1e5f221ee339118abdd2 # v0.2.0 with: post: | Pipeline release! ${{ github.repository }} v${{ github.event.release.tag_name }} - ${{ github.event.release.name }}! diff --git a/.github/workflows/template_version_comment.yml b/.github/workflows/template-version-comment.yml similarity index 95% rename from .github/workflows/template_version_comment.yml rename to .github/workflows/template-version-comment.yml index 537529bc1..beb5c77fb 100644 --- a/.github/workflows/template_version_comment.yml +++ b/.github/workflows/template-version-comment.yml @@ -14,7 +14,7 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - name: Read template version from .nf-core.yml - uses: nichmor/minimal-read-yaml@v0.0.2 + uses: nichmor/minimal-read-yaml@1f7205277e25e156e1f63815781db80a6d490b8f # v0.0.2 id: read_yml with: config: ${{ github.workspace }}/.nf-core.yml diff --git a/.nf-core.yml b/.nf-core.yml index 97dadc8dd..e72bad129 100644 --- a/.nf-core.yml +++ b/.nf-core.yml @@ -1,5 +1,4 @@ lint: - actions_ci: false files_exist: - conf/modules.config files_unchanged: @@ -11,7 +10,7 @@ lint: nextflow_config: - config_defaults: - params.ribo_database_manifest -nf_core_version: 3.2.0 +nf_core_version: 3.3.1 repository_type: pipeline template: author: "Harshil Patel, Phil Ewels, Rickard Hammar\xE9n" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1dec86502..9d0b248d3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,10 +4,24 @@ repos: hooks: - id: prettier additional_dependencies: - - prettier@3.2.5 - - - repo: https://github.com/editorconfig-checker/editorconfig-checker.python - rev: "3.1.2" + - prettier@3.5.0 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 hooks: - - id: editorconfig-checker - alias: ec + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + exclude: | + (?x)^( + .*ro-crate-metadata.json$| + modules/nf-core/.*| + subworkflows/nf-core/.*| + .*\.snap$ + )$ + - id: end-of-file-fixer + exclude: | + (?x)^( + .*ro-crate-metadata.json$| + modules/nf-core/.*| + subworkflows/nf-core/.*| + .*\.snap$ + )$ diff --git a/.prettierrc.yml b/.prettierrc.yml index c81f9a766..07dbd8bb9 100644 --- a/.prettierrc.yml +++ b/.prettierrc.yml @@ -1 +1,6 @@ printWidth: 120 +tabWidth: 4 +overrides: + - files: "*.{md,yml,yaml,html,css,scss,js,cff}" + options: + tabWidth: 2 diff --git a/README.md b/README.md index b4eb59468..b9d4d8de5 100644 --- a/README.md +++ b/README.md @@ -8,13 +8,14 @@ [![GitHub Actions CI Status](https://github.com/nf-core/rnaseq/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/rnaseq/actions/workflows/ci.yml) [![GitHub Actions Linting Status](https://github.com/nf-core/rnaseq/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/rnaseq/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/rnaseq/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.1400710-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.1400710)[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com) -[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A524.04.2-23aa62.svg)](https://www.nextflow.io/) +[![Nextflow](https://img.shields.io/badge/version-%E2%89%A524.04.2-green?style=flat&logo=nextflow&logoColor=white&color=%230DC09D&link=https%3A%2F%2Fnextflow.io)](https://www.nextflow.io/) +[![nf-core template version](https://img.shields.io/badge/nf--core_template-3.3.1-green?style=flat&logo=nfcore&logoColor=white&color=%2324B064&link=https%3A%2F%2Fnf-co.re)](https://github.com/nf-core/tools/releases/tag/3.3.1) [![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/) [![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/) [![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/) [![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/nf-core/rnaseq) -[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23rnaseq-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/rnaseq)[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23rnaseq-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/rnaseq)[![Follow on Bluesky](https://img.shields.io/badge/bluesky-%40nf__core-1185fe?labelColor=000000&logo=bluesky)](https://bsky.app/profile/nf-co.re)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core) ## Introduction diff --git a/conf/base.config b/conf/base.config index ad7fc74bc..acb33cedc 100644 --- a/conf/base.config +++ b/conf/base.config @@ -58,4 +58,7 @@ process { errorStrategy = 'retry' maxRetries = 2 } + withLabel: process_gpu { + ext.use_gpu = { workflow.profile.contains('gpu') } + } } diff --git a/modules/nf-core/fastqc/main.nf b/modules/nf-core/fastqc/main.nf index 033f4154a..d8989f481 100644 --- a/modules/nf-core/fastqc/main.nf +++ b/modules/nf-core/fastqc/main.nf @@ -1,5 +1,5 @@ process FASTQC { - tag "${meta.id}" + tag "$meta.id" label 'process_medium' conda "${moduleDir}/environment.yml" @@ -19,30 +19,30 @@ process FASTQC { task.ext.when == null || task.ext.when script: - def args = task.ext.args ?: '' - def prefix = task.ext.prefix ?: "${meta.id}" + def args = task.ext.args ?: '' + def prefix = task.ext.prefix ?: "${meta.id}" // Make list of old name and new name pairs to use for renaming in the bash while loop def old_new_pairs = reads instanceof Path || reads.size() == 1 ? [[ reads, "${prefix}.${reads.extension}" ]] : reads.withIndex().collect { entry, index -> [ entry, "${prefix}_${index + 1}.${entry.extension}" ] } - def rename_to = old_new_pairs*.join(' ').join(' ') - def renamed_files = old_new_pairs.collect{ _old_name, new_name -> new_name }.join(' ') + def rename_to = old_new_pairs*.join(' ').join(' ') + def renamed_files = old_new_pairs.collect{ old_name, new_name -> new_name }.join(' ') // The total amount of allocated RAM by FastQC is equal to the number of threads defined (--threads) time the amount of RAM defined (--memory) // https://github.com/s-andrews/FastQC/blob/1faeea0412093224d7f6a07f777fad60a5650795/fastqc#L211-L222 // Dividing the task.memory by task.cpu allows to stick to requested amount of RAM in the label - def memory_in_mb = task.memory ? task.memory.toUnit('MB').toFloat() / task.cpus : null + def memory_in_mb = MemoryUnit.of("${task.memory}").toUnit('MB') / task.cpus // FastQC memory value allowed range (100 - 10000) def fastqc_memory = memory_in_mb > 10000 ? 10000 : (memory_in_mb < 100 ? 100 : memory_in_mb) """ - printf "%s %s\\n" ${rename_to} | while read old_name new_name; do + printf "%s %s\\n" $rename_to | while read old_name new_name; do [ -f "\${new_name}" ] || ln -s \$old_name \$new_name done fastqc \\ - ${args} \\ - --threads ${task.cpus} \\ - --memory ${fastqc_memory} \\ - ${renamed_files} + $args \\ + --threads $task.cpus \\ + --memory $fastqc_memory \\ + $renamed_files cat <<-END_VERSIONS > versions.yml "${task.process}": diff --git a/modules/nf-core/fastqc/meta.yml b/modules/nf-core/fastqc/meta.yml index 2b2e62b8a..4827da7af 100644 --- a/modules/nf-core/fastqc/meta.yml +++ b/modules/nf-core/fastqc/meta.yml @@ -11,7 +11,6 @@ tools: FastQC gives general quality metrics about your reads. It provides information about the quality score distribution across your reads, the per base sequence content (%A/C/G/T). - You get information about adapter contamination and other overrepresented sequences. homepage: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/ diff --git a/nextflow.config b/nextflow.config index 112046bd0..0da93484b 100644 --- a/nextflow.config +++ b/nextflow.config @@ -260,8 +260,13 @@ profiles { ] } } - test { includeConfig 'conf/test.config' } - test_full { includeConfig 'conf/test_full.config' } + gpu { + docker.runOptions = '-u $(id -u):$(id -g) --gpus all' + apptainer.runOptions = '--nv' + singularity.runOptions = '--nv' + } + test { includeConfig 'conf/test.config' } + test_full { includeConfig 'conf/test_full.config' } test_full_aws { includeConfig 'conf/test_full.config' } @@ -275,11 +280,14 @@ profiles { } } -// Load nf-core custom profiles from different Institutions -includeConfig !System.getenv('NXF_OFFLINE') && params.custom_config_base ? "${params.custom_config_base}/nfcore_custom.config" : "/dev/null" +// Load nf-core custom profiles from different institutions + +// If params.custom_config_base is set AND either the NXF_OFFLINE environment variable is not set or params.custom_config_base is a local path, the nfcore_custom.config file from the specified base path is included. +// Load nf-core/rnaseq custom profiles from different institutions. +includeConfig params.custom_config_base && (!System.getenv('NXF_OFFLINE') || !params.custom_config_base.startsWith('http')) ? "${params.custom_config_base}/nfcore_custom.config" : "/dev/null" // Load nf-core/rnaseq custom profiles from different institutions. -includeConfig !System.getenv('NXF_OFFLINE') && params.custom_config_base ? "${params.custom_config_base}/pipeline/rnaseq.config" : "/dev/null" +includeConfig params.custom_config_base && (!System.getenv('NXF_OFFLINE') || !params.custom_config_base.startsWith('http')) ? "${params.custom_config_base}/pipeline/rnaseq.config" : "/dev/null" // Set default registry for Apptainer, Docker, Podman, Charliecloud and Singularity independent of -profile // Will not be used unless Apptainer / Docker / Podman / Charliecloud / Singularity are enabled diff --git a/nf-test.config b/nf-test.config index 9069ed831..015ad19dc 100644 --- a/nf-test.config +++ b/nf-test.config @@ -1,15 +1,22 @@ config { - // Location of nf-tests + // location for all nf-test tests testsDir "." - // nf-test directory used to create temporary files for each test + // nf-test directory including temporary files for each test workDir System.getenv("NFT_WORKDIR") ?: ".nf-test" - // Location of an optional nextflow.config file specific for executing pipeline tests + // location of an optional nextflow.config file specific for executing tests configFile "tests/nextflow.config" + // ignore tests coming from the nf-core/modules repo + ignore 'modules/nf-core/**/*', 'subworkflows/nf-core/**/*' + + // run all test with defined profile(s) from the main nextflow.config profile "test" + // list of filenames or patterns that should be trigger a full test run + triggers 'nextflow.config', 'nf-test.config', 'conf/test.config', 'tests/nextflow.config', 'tests/.nftignore' + // load the necessary plugins plugins { load "nft-bam@0.4.0" diff --git a/ro-crate-metadata.json b/ro-crate-metadata.json index 2808958e8..f5bac1122 100644 --- a/ro-crate-metadata.json +++ b/ro-crate-metadata.json @@ -22,8 +22,8 @@ "@id": "./", "@type": "Dataset", "creativeWorkStatus": "InProgress", - "datePublished": "2025-01-27T14:47:16+00:00", - "description": "

\n \n \n \"nf-core/rnaseq\"\n \n

\n\n[![GitHub Actions CI Status](https://github.com/nf-core/rnaseq/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/rnaseq/actions/workflows/ci.yml)\n[![GitHub Actions Linting Status](https://github.com/nf-core/rnaseq/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/rnaseq/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/rnaseq/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)\n[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\n\n[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A524.04.2-23aa62.svg)](https://www.nextflow.io/)\n[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\n[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\n[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\n[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/nf-core/rnaseq)\n\n[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23rnaseq-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/rnaseq)[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\n\n## Introduction\n\n**nf-core/rnaseq** is a bioinformatics pipeline that ...\n\n\n\n\n1. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/))2. Present QC for raw reads ([`MultiQC`](http://multiqc.info/))\n\n## Usage\n\n> [!NOTE]\n> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\n\n\n\nNow, you can run the pipeline using:\n\n\n\n```bash\nnextflow run nf-core/rnaseq \\\n -profile \\\n --input samplesheet.csv \\\n --outdir \n```\n\n> [!WARNING]\n> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; see [docs](https://nf-co.re/docs/usage/getting_started/configuration#custom-configuration-files).\n\nFor more details and further functionality, please refer to the [usage documentation](https://nf-co.re/rnaseq/usage) and the [parameter documentation](https://nf-co.re/rnaseq/parameters).\n\n## Pipeline output\n\nTo see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/rnaseq/results) tab on the nf-core website pipeline page.\nFor more details about the output files and reports, please refer to the\n[output documentation](https://nf-co.re/rnaseq/output).\n\n## Credits\n\nnf-core/rnaseq was originally written by Harshil Patel, Phil Ewels, Rickard Hammar\u00e9n.\n\nWe thank the following people for their extensive assistance in the development of this pipeline:\n\n\n\n## Contributions and Support\n\nIf you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\n\nFor further information or help, don't hesitate to get in touch on the [Slack `#rnaseq` channel](https://nfcore.slack.com/channels/rnaseq) (you can join with [this invite](https://nf-co.re/join/slack)).\n\n## Citations\n\n\n\n\n\n\nAn extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\n\nYou can cite the `nf-core` publication as follows:\n\n> **The nf-core framework for community-curated bioinformatics pipelines.**\n>\n> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\n>\n> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\n", + "datePublished": "2025-06-03T11:02:10+00:00", + "description": "

\n \n \n \"nf-core/rnaseq\"\n \n

\n\n[![GitHub Actions CI Status](https://github.com/nf-core/rnaseq/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/rnaseq/actions/workflows/ci.yml)\n[![GitHub Actions Linting Status](https://github.com/nf-core/rnaseq/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/rnaseq/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/rnaseq/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.1400710-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.1400710)[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\n\n[![Nextflow](https://img.shields.io/badge/version-%E2%89%A524.04.2-green?style=flat&logo=nextflow&logoColor=white&color=%230DC09D&link=https%3A%2F%2Fnextflow.io)](https://www.nextflow.io/)\n[![nf-core template version](https://img.shields.io/badge/nf--core_template-3.3.1-green?style=flat&logo=nfcore&logoColor=white&color=%2324B064&link=https%3A%2F%2Fnf-co.re)](https://github.com/nf-core/tools/releases/tag/3.3.1)\n[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\n[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\n[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\n[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/nf-core/rnaseq)\n\n[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23rnaseq-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/rnaseq)[![Follow on Bluesky](https://img.shields.io/badge/bluesky-%40nf__core-1185fe?labelColor=000000&logo=bluesky)](https://bsky.app/profile/nf-co.re)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\n\n## Introduction\n\n**nf-core/rnaseq** is a bioinformatics pipeline that can be used to analyse RNA sequencing data obtained from organisms with a reference genome and annotation. It takes a samplesheet and FASTQ files as input, performs quality control (QC), trimming and (pseudo-)alignment, and produces a gene expression matrix and extensive QC report.\n\n![nf-core/rnaseq metro map](docs/images/nf-core-rnaseq_metro_map_grey_animated.svg)\n\n> In case the image above is not loading, please have a look at the [static version](docs/images/nf-core-rnaseq_metro_map_grey.png).\n\n1. Merge re-sequenced FastQ files ([`cat`](http://www.linfo.org/cat.html))\n2. Auto-infer strandedness by subsampling and pseudoalignment ([`fq`](https://github.com/stjude-rust-labs/fq), [`Salmon`](https://combine-lab.github.io/salmon/))\n3. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/))\n4. UMI extraction ([`UMI-tools`](https://github.com/CGATOxford/UMI-tools))\n5. Adapter and quality trimming ([`Trim Galore!`](https://www.bioinformatics.babraham.ac.uk/projects/trim_galore/))\n6. Removal of genome contaminants ([`BBSplit`](http://seqanswers.com/forums/showthread.php?t=41288))\n7. Removal of ribosomal RNA ([`SortMeRNA`](https://github.com/biocore/sortmerna))\n8. Choice of multiple alignment and quantification routes:\n 1. [`STAR`](https://github.com/alexdobin/STAR) -> [`Salmon`](https://combine-lab.github.io/salmon/)\n 2. [`STAR`](https://github.com/alexdobin/STAR) -> [`RSEM`](https://github.com/deweylab/RSEM)\n 3. [`HiSAT2`](https://ccb.jhu.edu/software/hisat2/index.shtml) -> **NO QUANTIFICATION**\n9. Sort and index alignments ([`SAMtools`](https://sourceforge.net/projects/samtools/files/samtools/))\n10. UMI-based deduplication ([`UMI-tools`](https://github.com/CGATOxford/UMI-tools))\n11. Duplicate read marking ([`picard MarkDuplicates`](https://broadinstitute.github.io/picard/))\n12. Transcript assembly and quantification ([`StringTie`](https://ccb.jhu.edu/software/stringtie/))\n13. Create bigWig coverage files ([`BEDTools`](https://github.com/arq5x/bedtools2/), [`bedGraphToBigWig`](http://hgdownload.soe.ucsc.edu/admin/exe/))\n14. Extensive quality control:\n 1. [`RSeQC`](http://rseqc.sourceforge.net/)\n 2. [`Qualimap`](http://qualimap.bioinfo.cipf.es/)\n 3. [`dupRadar`](https://bioconductor.org/packages/release/bioc/html/dupRadar.html)\n 4. [`Preseq`](http://smithlabresearch.org/software/preseq/)\n 5. [`DESeq2`](https://bioconductor.org/packages/release/bioc/html/DESeq2.html)\n 6. [`Kraken2`](https://ccb.jhu.edu/software/kraken2/) -> [`Bracken`](https://ccb.jhu.edu/software/bracken/) on unaligned sequences; _optional_\n15. Pseudoalignment and quantification ([`Salmon`](https://combine-lab.github.io/salmon/) or ['Kallisto'](https://pachterlab.github.io/kallisto/); _optional_)\n16. Present QC for raw read, alignment, gene biotype, sample similarity, and strand-specificity checks ([`MultiQC`](http://multiqc.info/), [`R`](https://www.r-project.org/))\n\n> **Note**\n> The SRA download functionality has been removed from the pipeline (`>=3.2`) and ported to an independent workflow called [nf-core/fetchngs](https://nf-co.re/fetchngs). You can provide `--nf_core_pipeline rnaseq` when running nf-core/fetchngs to download and auto-create a samplesheet containing publicly available samples that can be accepted directly as input by this pipeline.\n\n> **Warning**\n> Quantification isn't performed if using `--aligner hisat2` due to the lack of an appropriate option to calculate accurate expression estimates from HISAT2 derived genomic alignments. However, you can use this route if you have a preference for the alignment, QC and other types of downstream analysis compatible with the output of HISAT2.\n\n## Usage\n\n> [!NOTE]\n> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\n\nFirst, prepare a samplesheet with your input data that looks as follows:\n\n**samplesheet.csv**:\n\n```csv\nsample,fastq_1,fastq_2,strandedness\nCONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz,auto\nCONTROL_REP1,AEG588A1_S1_L003_R1_001.fastq.gz,AEG588A1_S1_L003_R2_001.fastq.gz,auto\nCONTROL_REP1,AEG588A1_S1_L004_R1_001.fastq.gz,AEG588A1_S1_L004_R2_001.fastq.gz,auto\n```\n\nEach row represents a fastq file (single-end) or a pair of fastq files (paired end). Rows with the same sample identifier are considered technical replicates and merged automatically. The strandedness refers to the library preparation and will be automatically inferred if set to `auto`.\n\n> [!WARNING]\n> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; see [docs](https://nf-co.re/docs/usage/getting_started/configuration#custom-configuration-files).\n\nNow, you can run the pipeline using:\n\n```bash\nnextflow run nf-core/rnaseq \\\n --input \\\n --outdir \\\n --gtf \\\n --fasta \\\n -profile \n```\n\nFor more details and further functionality, please refer to the [usage documentation](https://nf-co.re/rnaseq/usage) and the [parameter documentation](https://nf-co.re/rnaseq/parameters).\n\n## Pipeline output\n\nTo see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/rnaseq/results) tab on the nf-core website pipeline page.\nFor more details about the output files and reports, please refer to the\n[output documentation](https://nf-co.re/rnaseq/output).\n\nThis pipeline quantifies RNA-sequenced reads relative to genes/transcripts in the genome and normalizes the resulting data. It does not compare the samples statistically in order to assign significance in the form of FDR or P-values. For downstream analyses, the output files from this pipeline can be analysed directly in statistical environments like [R](https://www.r-project.org/), [Julia](https://julialang.org/) or via the [nf-core/differentialabundance](https://github.com/nf-core/differentialabundance/) pipeline.\n\n## Online videos\n\nA short talk about the history, current status and functionality on offer in this pipeline was given by Harshil Patel ([@drpatelh](https://github.com/drpatelh)) on [8th February 2022](https://nf-co.re/events/2022/bytesize-32-nf-core-rnaseq) as part of the nf-core/bytesize series.\n\nYou can find numerous talks on the [nf-core events page](https://nf-co.re/events) from various topics including writing pipelines/modules in Nextflow DSL2, using nf-core tooling, running nf-core pipelines as well as more generic content like contributing to Github. Please check them out!\n\n## Credits\n\nThese scripts were originally written for use at the [National Genomics Infrastructure](https://ngisweden.scilifelab.se), part of [SciLifeLab](http://www.scilifelab.se/) in Stockholm, Sweden, by Phil Ewels ([@ewels](https://github.com/ewels)) and Rickard Hammar\u00e9n ([@Hammarn](https://github.com/Hammarn)).\n\nThe pipeline was re-written in Nextflow DSL2 and is primarily maintained by Harshil Patel ([@drpatelh](https://github.com/drpatelh)) from [Seqera Labs, Spain](https://seqera.io/).\n\nThe pipeline workflow diagram was initially designed by Sarah Guinchard ([@G-Sarah](https://github.com/G-Sarah)) and James Fellows Yates ([@jfy133](https://github.com/jfy133)), further modifications where made by Harshil Patel ([@drpatelh](https://github.com/drpatelh)) and Maxime Garcia ([@maxulysse](https://github.com/maxulysse)).\n\nMany thanks to other who have helped out along the way too, including (but not limited to):\n\n- [Alex Peltzer](https://github.com/apeltzer)\n- [Colin Davenport](https://github.com/colindaven)\n- [Denis Moreno](https://github.com/Galithil)\n- [Edmund Miller](https://github.com/edmundmiller)\n- [Gregor Sturm](https://github.com/grst)\n- [Jacki Buros Novik](https://github.com/jburos)\n- [Lorena Pantano](https://github.com/lpantano)\n- [Matthias Zepper](https://github.com/MatthiasZepper)\n- [Maxime Garcia](https://github.com/maxulysse)\n- [Olga Botvinnik](https://github.com/olgabot)\n- [@orzechoj](https://github.com/orzechoj)\n- [Paolo Di Tommaso](https://github.com/pditommaso)\n- [Rob Syme](https://github.com/robsyme)\n\n## Contributions and Support\n\nIf you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\n\nFor further information or help, don't hesitate to get in touch on the [Slack `#rnaseq` channel](https://nfcore.slack.com/channels/rnaseq) (you can join with [this invite](https://nf-co.re/join/slack)).\n\n## Citations\n\nIf you use nf-core/rnaseq for your analysis, please cite it using the following doi: [10.5281/zenodo.1400710](https://doi.org/10.5281/zenodo.1400710)\n\nAn extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\n\nYou can cite the `nf-core` publication as follows:\n\n> **The nf-core framework for community-curated bioinformatics pipelines.**\n>\n> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\n>\n> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\n", "hasPart": [ { "@id": "main.nf" @@ -99,7 +99,7 @@ }, "mentions": [ { - "@id": "#0bb8ad79-8c56-4c40-8c8f-3f3a759cb012" + "@id": "#75f4967a-2681-475e-96a0-5ce5bc0521b7" } ], "name": "nf-core/rnaseq" @@ -121,31 +121,49 @@ }, { "@id": "main.nf", - "@type": ["File", "SoftwareSourceCode", "ComputationalWorkflow"], + "@type": [ + "File", + "SoftwareSourceCode", + "ComputationalWorkflow" + ], "creator": [ { "@id": "#phil.ewels@scilifelab.se" } ], "dateCreated": "", - "dateModified": "2025-01-27T14:47:16Z", + "dateModified": "2025-06-03T11:02:10Z", "dct:conformsTo": "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/", - "keywords": ["nf-core", "nextflow", "rna", "rna-seq"], - "license": ["MIT"], + "keywords": [ + "nf-core", + "nextflow", + "rna", + "rna-seq" + ], + "license": [ + "MIT" + ], "maintainer": [ { "@id": "#phil.ewels@scilifelab.se" } ], - "name": ["nf-core/rnaseq"], + "name": [ + "nf-core/rnaseq" + ], "programmingLanguage": { "@id": "https://w3id.org/workflowhub/workflow-ro-crate#nextflow" }, "sdPublisher": { "@id": "https://nf-co.re/" }, - "url": ["https://github.com/nf-core/rnaseq", "https://nf-co.re/rnaseq/dev/"], - "version": ["3.17.0dev"] + "url": [ + "https://github.com/nf-core/rnaseq", + "https://nf-co.re/rnaseq/dev/" + ], + "version": [ + "3.17.0dev" + ] }, { "@id": "https://w3id.org/workflowhub/workflow-ro-crate#nextflow", @@ -160,11 +178,11 @@ "version": "!>=24.04.2" }, { - "@id": "#0bb8ad79-8c56-4c40-8c8f-3f3a759cb012", + "@id": "#75f4967a-2681-475e-96a0-5ce5bc0521b7", "@type": "TestSuite", "instance": [ { - "@id": "#23a449b9-016a-4957-a48a-d2dc5dc0b823" + "@id": "#fa6744ce-944a-4de2-9c46-d4e1c31dba4b" } ], "mainEntity": { @@ -173,10 +191,10 @@ "name": "Test suite for nf-core/rnaseq" }, { - "@id": "#23a449b9-016a-4957-a48a-d2dc5dc0b823", + "@id": "#fa6744ce-944a-4de2-9c46-d4e1c31dba4b", "@type": "TestInstance", "name": "GitHub Actions workflow for testing nf-core/rnaseq", - "resource": "repos/nf-core/rnaseq/actions/workflows/ci.yml", + "resource": "repos/nf-core/rnaseq/actions/workflows/nf-test.yml", "runsOn": { "@id": "https://w3id.org/ro/terms/test#GithubService" }, @@ -308,4 +326,4 @@ "name": "Phil Ewels" } ] -} +} \ No newline at end of file diff --git a/subworkflows/local/utils_nfcore_rnaseq_pipeline/main.nf b/subworkflows/local/utils_nfcore_rnaseq_pipeline/main.nf index 320e395b1..47cd88ac7 100644 --- a/subworkflows/local/utils_nfcore_rnaseq_pipeline/main.nf +++ b/subworkflows/local/utils_nfcore_rnaseq_pipeline/main.nf @@ -654,4 +654,3 @@ def rnaseqSummary(monochrome_logs=true, pass_mapped_reads=[:], pass_trimmed_read log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed with errors${colors.reset}-" } } - diff --git a/tests/.nftignore b/tests/.nftignore index 6cde6d5a3..9b230b8be 100644 --- a/tests/.nftignore +++ b/tests/.nftignore @@ -1,3 +1,4 @@ +.DS_Store fq_lint/*/*.fq_lint.txt bbsplit/*.stats.txt fastqc/*/*.{html,zip} @@ -17,6 +18,7 @@ umitools/*.umi_extract.log {hisat2,star_rsem,star_salmon}/*.{bam,bam.bai} {hisat2,star_rsem,star_salmon}/bigwig/*.{forward,reverse}.bigWig {hisat2,star_rsem,star_salmon}/dupradar/box_plot/*_duprateExpBoxplot.pdf +{hisat2,star_rsem,star_salmon}/dupradar/intercepts_slope/*_intercept_slope.txt {hisat2,star_rsem,star_salmon}/dupradar/histogram/*_expressionHist.pdf {hisat2,star_rsem,star_salmon}/dupradar/scatter_plot/*_duprateExpDens.pdf {hisat2,star_rsem,star_salmon}/featurecounts/*.featureCounts.txt.summary diff --git a/tests/default.nf.test.snap b/tests/default.nf.test.snap index 3b6b6b481..6e102a654 100644 --- a/tests/default.nf.test.snap +++ b/tests/default.nf.test.snap @@ -1419,11 +1419,6 @@ "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,28c30ce734d78d53b1c47c3f87414e4b", "WT_REP1_dupMatrix.txt:md5,faaa9a4d73efb7188bbe4a480c680ea0", "WT_REP2_dupMatrix.txt:md5,02236769150436cf31b7339f612119a5", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,c677048855caf5190f29fa5f7137cd79", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,ea1b4323d3bc83759e8a026416bca32b", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,b062f49a5223a3452075062cdd308043", - "WT_REP1_intercept_slope.txt:md5,051e7b02c31dd614d580d8b44e1b6898", - "WT_REP2_intercept_slope.txt:md5,5af16b07a734b73a2c1103b535f9e26e", "RAP1_IAA_30M_REP1.biotype_counts_mqc.tsv:md5,6940e190bb388be56f282aa01e916466", "RAP1_IAA_30M_REP1.biotype_counts_rrna_mqc.tsv:md5,dde2de0cb90e10d0195c726f768e9941", "RAP1_IAA_30M_REP1.featureCounts.txt:md5,2a5c8c10f4371048d7d459411b58b087", diff --git a/tests/featurecounts_group_type.nf.test.snap b/tests/featurecounts_group_type.nf.test.snap index 7774b5eb1..869d56e1a 100644 --- a/tests/featurecounts_group_type.nf.test.snap +++ b/tests/featurecounts_group_type.nf.test.snap @@ -1384,11 +1384,6 @@ "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,28c30ce734d78d53b1c47c3f87414e4b", "WT_REP1_dupMatrix.txt:md5,faaa9a4d73efb7188bbe4a480c680ea0", "WT_REP2_dupMatrix.txt:md5,02236769150436cf31b7339f612119a5", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,c677048855caf5190f29fa5f7137cd79", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,ea1b4323d3bc83759e8a026416bca32b", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,b062f49a5223a3452075062cdd308043", - "WT_REP1_intercept_slope.txt:md5,051e7b02c31dd614d580d8b44e1b6898", - "WT_REP2_intercept_slope.txt:md5,5af16b07a734b73a2c1103b535f9e26e", "RAP1_IAA_30M_REP1.SJ.out.tab:md5,ea95e243278af55534f2c52eb5fff7ee", "RAP1_UNINDUCED_REP1.SJ.out.tab:md5,e548d13942535dc0821f3ec6d9743ec8", "RAP1_UNINDUCED_REP2.SJ.out.tab:md5,1f294365343a1a5e95682792fdb77033", diff --git a/tests/hisat2.nf.test.snap b/tests/hisat2.nf.test.snap index ba6799920..b353a132f 100644 --- a/tests/hisat2.nf.test.snap +++ b/tests/hisat2.nf.test.snap @@ -1207,11 +1207,6 @@ "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,9a5c2672c5817e930c7b884ada8fd92c", "WT_REP1_dupMatrix.txt:md5,11371da7a087879340c2e7e6842a5d89", "WT_REP2_dupMatrix.txt:md5,5176c7447c4295f94e2683dd9995cea0", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,1285c70833b46849b726412858736ed7", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,a2efe7c3cad6f910d5dc208c2825a245", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,117712525fcbd396f77710f4f4b605d9", - "WT_REP1_intercept_slope.txt:md5,df33cbc6c3cb1c85c0c06cdba7df3873", - "WT_REP2_intercept_slope.txt:md5,38fbb93a419e666bc81020602852b1e2", "RAP1_IAA_30M_REP1.biotype_counts_mqc.tsv:md5,cd7494b3bb12295a287f36506638f3c6", "RAP1_IAA_30M_REP1.biotype_counts_rrna_mqc.tsv:md5,dde2de0cb90e10d0195c726f768e9941", "RAP1_IAA_30M_REP1.featureCounts.txt:md5,a60088d7013eebeb240e02457431eedb", diff --git a/tests/min_mapped_reads.nf.test.snap b/tests/min_mapped_reads.nf.test.snap index cd3703da2..627e34c21 100644 --- a/tests/min_mapped_reads.nf.test.snap +++ b/tests/min_mapped_reads.nf.test.snap @@ -1148,9 +1148,6 @@ "RAP1_IAA_30M_REP1_dupMatrix.txt:md5,2e0d518a450bb57801cdd075d4e9c217", "RAP1_UNINDUCED_REP1_dupMatrix.txt:md5,96e2f9e1fc5a22a7d468e6fb4a613370", "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,28c30ce734d78d53b1c47c3f87414e4b", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,c677048855caf5190f29fa5f7137cd79", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,ea1b4323d3bc83759e8a026416bca32b", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,b062f49a5223a3452075062cdd308043", "RAP1_IAA_30M_REP1.biotype_counts_mqc.tsv:md5,6940e190bb388be56f282aa01e916466", "RAP1_IAA_30M_REP1.biotype_counts_rrna_mqc.tsv:md5,dde2de0cb90e10d0195c726f768e9941", "RAP1_IAA_30M_REP1.featureCounts.txt:md5,2a5c8c10f4371048d7d459411b58b087", diff --git a/tests/nextflow.config b/tests/nextflow.config index 942f65737..6ea8b2c71 100644 --- a/tests/nextflow.config +++ b/tests/nextflow.config @@ -1,22 +1,17 @@ /* ======================================================================================== - Nextflow config file for running tests + Nextflow config file for running nf-test tests ======================================================================================== */ -params { - // Base directory for nf-core/modules test data - modules_testdata_base_path = 's3://ngi-igenomes/testdata/nf-core/modules/' +// TODO nf-core: Specify any additional parameters here +// Or any resources requirements +params.modules_testdata_base_path = 's3://ngi-igenomes/testdata/nf-core/modules/' +params.pipelines_testdata_base_path = 's3://ngi-igenomes/testdata/nf-core/pipelines/rnaseq/3.15/' +params.hisat2_build_memory = '3.GB' +params.outdir = 'results' - // Base directory for nf-core/rnaseq test data - pipelines_testdata_base_path = 's3://ngi-igenomes/testdata/nf-core/pipelines/rnaseq/3.15/' - - // for hisat2 - hisat2_build_memory = '3.GB' - - // TODO: check if we rather do this or disable publishdir for all processes when testing modules/subworkflows - outdir = 'results' -} +aws.client.anonymous = true // fixes S3 access issues on self-hosted runners // Impose sensible resource limits for testing process { @@ -30,14 +25,3 @@ process { ext.args = null } } - -// Impose same minimum Nextflow version as the pipeline for testing -manifest { - nextflowVersion = '!>=24.04.2' -} - -// Disable all Nextflow reporting options -timeline { enabled = false } -report { enabled = false } -trace { enabled = false } -dag { enabled = false } diff --git a/tests/remove_ribo_rna.nf.test.snap b/tests/remove_ribo_rna.nf.test.snap index a661a42c3..9e63e545f 100644 --- a/tests/remove_ribo_rna.nf.test.snap +++ b/tests/remove_ribo_rna.nf.test.snap @@ -1335,11 +1335,6 @@ "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,c0bf1135d2d70c5918d87f706c0c54bc", "WT_REP1_dupMatrix.txt:md5,b82c4fed335d03e85c414c91c2efd461", "WT_REP2_dupMatrix.txt:md5,bab18079153627205e5d907d8dfba677", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,b21a53b0156b0afdfc28c237cb11218d", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,eccc5ca37855c717d35477e684188695", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,c3900f3bb24cd339d0e0ecf65ae43217", - "WT_REP1_intercept_slope.txt:md5,7db7a898152e8eaab96b1aaf0e567e50", - "WT_REP2_intercept_slope.txt:md5,9bcbc18d0d331fe3c972538456ffd5b2", "RAP1_IAA_30M_REP1.biotype_counts_mqc.tsv:md5,accedae963f399d3b47effd8eda41edb", "RAP1_IAA_30M_REP1.biotype_counts_rrna_mqc.tsv:md5,dde2de0cb90e10d0195c726f768e9941", "RAP1_IAA_30M_REP1.featureCounts.txt:md5,e37ededa12690a8ccae2eb42dac47ed6", diff --git a/tests/skip_trimming.nf.test.snap b/tests/skip_trimming.nf.test.snap index ddbe78bd4..ff5d9021e 100644 --- a/tests/skip_trimming.nf.test.snap +++ b/tests/skip_trimming.nf.test.snap @@ -1199,11 +1199,6 @@ "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,bf065e266566c3fb96dc9319a3ed12cf", "WT_REP1_dupMatrix.txt:md5,04512c8be70c1898d0262935ed5ec5bd", "WT_REP2_dupMatrix.txt:md5,5052efd01f5086d394499b9c4626deb1", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,7962ee6ba989f9af6465aaf1101ba13b", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,6c69ccc819c14847eef319b7fa8ccb4f", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,147b051de3f1e5c75821176851d13612", - "WT_REP1_intercept_slope.txt:md5,395cde13da1a90cf4378be2597c4f297", - "WT_REP2_intercept_slope.txt:md5,7ddae43baa3c4e499fbc319b95627c41", "RAP1_IAA_30M_REP1.biotype_counts_mqc.tsv:md5,bc80cee5887507179965623c3dfef4f9", "RAP1_IAA_30M_REP1.biotype_counts_rrna_mqc.tsv:md5,dde2de0cb90e10d0195c726f768e9941", "RAP1_IAA_30M_REP1.featureCounts.txt:md5,b1ed22e51a94a0ca8f030ff4e09295aa", diff --git a/tests/star_rsem.nf.test b/tests/star_rsem.nf.test index 864b0561f..33d05f50a 100644 --- a/tests/star_rsem.nf.test +++ b/tests/star_rsem.nf.test @@ -66,4 +66,3 @@ nextflow_pipeline { } } } - diff --git a/tests/star_rsem.nf.test.snap b/tests/star_rsem.nf.test.snap index 327a12726..cca45e1cd 100644 --- a/tests/star_rsem.nf.test.snap +++ b/tests/star_rsem.nf.test.snap @@ -1224,11 +1224,6 @@ "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,7d96d6ddf1d12d43837b105865aeaafa", "WT_REP1_dupMatrix.txt:md5,802dd0de10d9118943869239f8659c78", "WT_REP2_dupMatrix.txt:md5,e97a3c8d2e606d7d4b40cd33eb0b96c4", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,d280fe126a5e82d24121d8662fd5a161", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,a498640b0f9e710311ebc3eb67cedbd0", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,a1c5346e3bad40546c793b8914a22e7e", - "WT_REP1_intercept_slope.txt:md5,9c2e2b7890427e79c97e65827102d965", - "WT_REP2_intercept_slope.txt:md5,db7a36459f2036de6657e74d2c98a47c", "RAP1_IAA_30M_REP1.biotype_counts_mqc.tsv:md5,5a7a4291e8ff6cc25a4eb72dfdf06b51", "RAP1_IAA_30M_REP1.biotype_counts_rrna_mqc.tsv:md5,dde2de0cb90e10d0195c726f768e9941", "RAP1_IAA_30M_REP1.featureCounts.txt:md5,a8200ec76e7916dc1210447130f77bbf", diff --git a/tests/umi.nf.test.snap b/tests/umi.nf.test.snap index c049fde43..1acd7c176 100644 --- a/tests/umi.nf.test.snap +++ b/tests/umi.nf.test.snap @@ -1397,11 +1397,6 @@ "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,ee34da4ee0f7b56c710f5df041f88f31", "WT_REP1_dupMatrix.txt:md5,6416d48ec754942e0a5a0c1a81680fe5", "WT_REP2_dupMatrix.txt:md5,60e583c84a0d8b31cb667703c56d6c33", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,6c1e7c494f765ff6466ab023d52a1d70", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,93a525d3bc1ebfeddc8f04f3c2238237", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,74f6210123d50c2001f20dad6283e856", - "WT_REP1_intercept_slope.txt:md5,4d428c55745bf9e2832d7fdb76b72088", - "WT_REP2_intercept_slope.txt:md5,d6a47171cb52331c5e6e21671152fb98", "RAP1_IAA_30M_REP1.biotype_counts_mqc.tsv:md5,d0e2b4a2e14fa97ad49c4baacfb1d5e3", "RAP1_IAA_30M_REP1.biotype_counts_rrna_mqc.tsv:md5,dde2de0cb90e10d0195c726f768e9941", "RAP1_IAA_30M_REP1.featureCounts.txt:md5,c90762d8ee1df9219dc643b68e464ca0", @@ -2604,11 +2599,6 @@ "RAP1_UNINDUCED_REP2_dupMatrix.txt:md5,6726b1c5e63db1f6efd2882de701adc1", "WT_REP1_dupMatrix.txt:md5,1d57d6942d0720bddd25ff260dce08fa", "WT_REP2_dupMatrix.txt:md5,b944b0f71ef8a98b5a4d53f4542c8e6f", - "RAP1_IAA_30M_REP1_intercept_slope.txt:md5,a07fbfcbd487003cdd2123cb89209d14", - "RAP1_UNINDUCED_REP1_intercept_slope.txt:md5,38cea811141edcfd28d1e7770628f6c9", - "RAP1_UNINDUCED_REP2_intercept_slope.txt:md5,b897caec80a37ace72c156ed091acbcd", - "WT_REP1_intercept_slope.txt:md5,9895ef03e6f9282b11c4a32fe16a25b0", - "WT_REP2_intercept_slope.txt:md5,6c22669508aecfdfe5583c38496db314", "RAP1_IAA_30M_REP1.biotype_counts_mqc.tsv:md5,8433a395e65315feb0f8bfca4a1d1aba", "RAP1_IAA_30M_REP1.biotype_counts_rrna_mqc.tsv:md5,dde2de0cb90e10d0195c726f768e9941", "RAP1_IAA_30M_REP1.featureCounts.txt:md5,1ecf699b693201b29cadd187bb0f2971",