diff --git a/modules/nf-core/deseq2/differential/tests/main.nf.test b/modules/nf-core/deseq2/differential/tests/main.nf.test index 370f05dcd747..b8a63843f13a 100644 --- a/modules/nf-core/deseq2/differential/tests/main.nf.test +++ b/modules/nf-core/deseq2/differential/tests/main.nf.test @@ -12,7 +12,7 @@ nextflow_process { test("mouse - contrasts - matrix") { config './contrasts_matrix.config' - + when { process { """ @@ -24,13 +24,13 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_spikes = [[],[]] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes @@ -52,7 +52,7 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } } @@ -72,15 +72,15 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_lengths = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.spoofed_lengths.tsv', checkIfExists: true) ] - + ch_spikes = [[],[]] input[0] = ch_contrasts @@ -105,7 +105,7 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } @@ -114,7 +114,7 @@ nextflow_process { test("mouse - contrasts - matrix - no blocking") { config './contrasts_matrix_noblocking.config' - + when { process { """ @@ -126,13 +126,13 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_spikes = [[],[]] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes @@ -154,7 +154,7 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } } @@ -162,7 +162,7 @@ nextflow_process { test("mouse - contrasts - matrix - spikes") { config './contrasts_matrix_spikes.config' - + when { process { """ @@ -174,16 +174,16 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_spikes = [ - ['id':'ERCC'], + ['id':'ERCC'], file(expression_test_data_dir + 'SRP254919.spikes.tsv', checkIfExists: true) ] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes @@ -205,15 +205,15 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } } - + test("mouse - contrasts - matrix - strip spikes") { config './contrasts_matrix.config' - + when { process { """ @@ -225,16 +225,16 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_spikes = [ - ['id':'ERCC'], + ['id':'ERCC'], file(expression_test_data_dir + 'SRP254919.spikes.tsv', checkIfExists: true) ] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes @@ -256,15 +256,15 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } } - + test("mouse - contrasts - matrix - csv") { config './contrasts_matrix.config' - + when { process { """ @@ -288,7 +288,7 @@ nextflow_process { } ch_spikes = [[],[]] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes @@ -310,15 +310,15 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } } - + test("mouse - contrasts - matrix - vst nsub") { config './contrasts_matrix_vst_nsub.config' - + when { process { """ @@ -330,13 +330,13 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_spikes = [[],[]] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes @@ -358,15 +358,15 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } } - + test("mouse - contrasts - matrix - subset to contrast") { config './contrasts_matrix_subset_to_contrast.config' - + when { process { """ @@ -378,13 +378,13 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_spikes = [[],[]] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes @@ -406,7 +406,7 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } } @@ -414,7 +414,7 @@ nextflow_process { test("mouse - contrasts - matrix - exclude samples") { config './contrasts_matrix_exclude_samples.config' - + when { process { """ @@ -426,13 +426,13 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_spikes = [[],[]] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes @@ -454,16 +454,16 @@ nextflow_process { file(process.out.dispersion_plot[0][1]).name, file(process.out.rdata[0][1]).name ).match() }, - { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } + { assert path(process.out.session_info[0][1]).text.contains("DESeq2_1.34.0") } ) } } test("mouse - contrasts - matrix - stub") { config './contrasts_matrix.config' - + options "-stub" - + when { process { """ @@ -475,13 +475,13 @@ nextflow_process { tuple(it, it.variable, it.reference, it.target) } ch_matrix = [ - [id: 'test'], + [id: 'test'], file(expression_test_data_dir + 'SRP254919.samplesheet.csv', checkIfExists: true), file(expression_test_data_dir + 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', checkIfExists: true) ] ch_spikes = [[],[]] ch_lengths = [[],[]] - + input[0] = ch_contrasts input[1] = ch_matrix input[2] = ch_spikes diff --git a/modules/nf-core/limma/differential/tests/main.nf.test b/modules/nf-core/limma/differential/tests/main.nf.test index 4cdd0014e716..551a53f12d7a 100644 --- a/modules/nf-core/limma/differential/tests/main.nf.test +++ b/modules/nf-core/limma/differential/tests/main.nf.test @@ -220,7 +220,7 @@ nextflow_process { { assert process.success }, { assert snapshot(process.out.model, process.out.versions).match() }, { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000023978\t-4.89014922224241") }, - { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.779227180297504") }, + { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.77922") }, { assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000023978\t6.11247620232167") }, { assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000059991\t4.52751370160052") } ) @@ -249,7 +249,7 @@ nextflow_process { { assert process.success }, { assert snapshot(process.out.model, process.out.versions).match() }, { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000023978\t-2.84055986312942") }, - { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.705508451587681") }, + { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.7055") }, { assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000023978\t6.11247620232167") }, { assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000059991\t4.52751370160052") } ) @@ -278,7 +278,7 @@ nextflow_process { { assert process.success }, { assert snapshot(process.out.model, process.out.versions).match() }, { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000023978\t-2.8363444336503") }, - { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.736009837885068") }, + { assert path(process.out.results[0][1]).getText().contains("ENSMUSG00000059991\t0.7360") }, { assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000023978\t6.11247620232167") }, { assert path(process.out.normalised_counts[0][1]).getText().contains("ENSMUSG00000059991\t4.52751370160052") } ) diff --git a/modules/nf-core/propr/propd/meta.yml b/modules/nf-core/propr/propd/meta.yml index 57f1521f15f9..ba7a7a2fefa9 100644 --- a/modules/nf-core/propr/propd/meta.yml +++ b/modules/nf-core/propr/propd/meta.yml @@ -58,13 +58,13 @@ output: - meta: type: file description: | - TSV-format table of genes associated with differential expression + TSV-format table of genes associated with differential expression information as compiled from the propd results pattern: "*.propd.genewise.tsv" - "*.propd.genewise.tsv": type: file description: | - TSV-format table of genes associated with differential expression + TSV-format table of genes associated with differential expression information as compiled from the propd results pattern: "*.propd.genewise.tsv" - genewise_plot: @@ -97,15 +97,15 @@ output: - meta: type: file description: | - (Optional) TSV-format table of the native propd pairwise results. This - table contains the differential proportionality values associated to + (Optional) TSV-format table of the native propd pairwise results. This + table contains the differential proportionality values associated to each pair of genes. pattern: "*.propd.pairwise.tsv" - "*.propd.pairwise.tsv": type: file description: | - (Optional) TSV-format table of the native propd pairwise results. This - table contains the differential proportionality values associated to + (Optional) TSV-format table of the native propd pairwise results. This + table contains the differential proportionality values associated to each pair of genes. pattern: "*.propd.pairwise.tsv" - results_pairwise_filtered: @@ -113,28 +113,28 @@ output: type: file description: | (Optional) TSV-format table of the filtered propd pairwise results. This - table contains the pairs of genes with significant differential + table contains the pairs of genes with significant differential proportionality values. pattern: "*.propd.pairwise_filtered.tsv" - "*.propd.pairwise_filtered.tsv": type: file description: | (Optional) TSV-format table of the filtered propd pairwise results. This - table contains the pairs of genes with significant differential + table contains the pairs of genes with significant differential proportionality values. pattern: "*.propd.pairwise_filtered.tsv" - adjacency: - meta: type: file description: | - (Optional) CSV-format table of the adjacency matrix defining a graph, with + (Optional) CSV-format table of the adjacency matrix defining a graph, with edges (1) associated to pairs of genes that are significantly differentially proportional. pattern: "*.propd.adjacency.csv" - "*.propd.adjacency.csv": type: file description: | - (Optional) CSV-format table of the adjacency matrix defining a graph, with + (Optional) CSV-format table of the adjacency matrix defining a graph, with edges (1) associated to pairs of genes that are significantly differentially proportional. pattern: "*.propd.adjacency.csv" diff --git a/modules/nf-core/variancepartition/dream/main.nf b/modules/nf-core/variancepartition/dream/main.nf index c9fa12dabfbe..c3b4a59ff0fd 100644 --- a/modules/nf-core/variancepartition/dream/main.nf +++ b/modules/nf-core/variancepartition/dream/main.nf @@ -1,5 +1,5 @@ process VARIANCEPARTITION_DREAM { - tag "${meta.id} - ${meta.contrast_id}" + tag "${meta.id}" label 'process_single' conda "${moduleDir}/environment.yml" @@ -8,10 +8,12 @@ process VARIANCEPARTITION_DREAM { 'community.wave.seqera.io/library/bioconductor-edger_bioconductor-variancepartition_r-optparse:ba778938d72f30c5' }" input: - tuple val(meta), path(samplesheet), path(counts) + tuple val(meta), val(contrast_variable), val(reference), val(target), val(formula) + tuple val(meta2), path(samplesheet), path(counts) output: tuple val(meta), path("*.dream.results.tsv") , emit: results + tuple val(meta), path("*.dream.model.txt") , emit: model path "versions.yml" , emit: versions when: @@ -21,9 +23,9 @@ process VARIANCEPARTITION_DREAM { template 'dream.R' stub: - prefix = task.ext.prefix ?: "${meta.id}" """ - touch "${meta.contrast_id}.dream.results.tsv" + touch "${meta.id}.dream.results.tsv" + touch "${meta.id}.dream.model.txt" cat <<-END_VERSIONS > versions.yml "${task.process}": diff --git a/modules/nf-core/variancepartition/dream/meta.yml b/modules/nf-core/variancepartition/dream/meta.yml index da5b3a900d4d..48011b8451b7 100644 --- a/modules/nf-core/variancepartition/dream/meta.yml +++ b/modules/nf-core/variancepartition/dream/meta.yml @@ -18,16 +18,30 @@ input: - - meta: type: map description: | - Groovy Map containing run and contrast information. This can be used at the - workflow level to pass optional parameters to the module, e.g. - [ - contrast_id: 'treatment_mCherry_hND6', - contrast_variable: 'treatment', - contrast_reference: 'mCherry', - contrast_target: 'hND6', - formula: '~ treatment + (1 | sample_number)' - ] - Plus mandatory `id` and other fields created along the run. + Groovy Map containing run and contrast information. + - contrast_variable: + type: string + description: | + The column in the sample sheet that should be used to define groups for + comparison + - reference: + type: string + description: | + The value within the contrast_variable column of the sample sheet that + should be used to derive the reference samples + - target: + type: string + description: | + The value within the contrast_variable column of the sample sheet that + should be used to derive the target samples + - formula: + type: string + description: (Mandatory) R formula string used for modeling, e.g. '~ treatment + (1 | sample_number)'. + - - meta2: + type: map + description: | + Groovy map containing study-wide metadata related to the sample sheet + and matrix - samplesheet: type: file description: Sample sheet file containing sample metadata. @@ -35,8 +49,7 @@ input: - counts: type: file description: | - Raw TSV or CSV format expression matrix with count data by row and samples - by column + TSV or CSV format expression matrix with genes as rows and samples as columns. ontologies: [] output: - results: @@ -49,13 +62,25 @@ output: description: | TSV-format table of differential expression information as output by Dream pattern: "*.dream.results.tsv" + ontologies: + - edam: http://edamontology.org/format_3475 # TSV + - model: + - meta: + type: map + description: Metadata map passed through from input. + - "*.dream.model.txt": + type: file + description: | + R model description text file. + pattern: "*.dream.model.txt" ontologies: [] - versions: - versions.yml: type: file description: File containing software versions pattern: "versions.yml" - ontologies: [] + ontologies: + - edam: http://edamontology.org/format_3750 # YAML authors: - "@alanmmobbs03" - "@nschcolnicov" diff --git a/modules/nf-core/variancepartition/dream/templates/dream.R b/modules/nf-core/variancepartition/dream/templates/dream.R index 653fd459004a..3bd8c627989a 100644 --- a/modules/nf-core/variancepartition/dream/templates/dream.R +++ b/modules/nf-core/variancepartition/dream/templates/dream.R @@ -47,12 +47,12 @@ read_delim_flexible <- function(file, header = TRUE, row.names = NULL, check.nam # Options list opt <- list( - output_prefix = "$meta.contrast_id", # Prefix for output files + output_prefix = ifelse('$task.ext.prefix' == 'null', '$meta.id', '$task.ext.prefix'), count_file = "$counts", # File containing raw counts sample_file = "$samplesheet", # File containing sample information - contrast_variable = "$meta.contrast_variable", # Variable for contrast (e.g., "treatment") - contrast_reference = "$meta.contrast_reference",# Reference level for the contrast - contrast_target = "$meta.contrast_target", # Target level for the contrast (e.g., "mCherry") + contrast_variable = "$contrast_variable", # Variable for contrast (e.g., "treatment") + contrast_reference = "$reference", # Reference level for the contrast + contrast_target = "$target", # Target level for the contrast (e.g., "mCherry") sample_id_col = "sample", # Column name for sample IDs threads = "$task.cpus", # Number of threads for multithreading subset_to_contrast_samples = FALSE, # Whether to subset to contrast samples @@ -69,7 +69,7 @@ opt <- list( winsor_tail_p = "0.05,0.1", # Winsor tail probabilities for eBayes ddf = "adaptive", # 'Satterthwaite', 'Kenward-Roger', or 'adaptive' reml = FALSE, - formula = "$meta.formula", # User-specified formula (e.g. "~ + (1 | sample_number)") + formula = "$formula", # User-specified formula (e.g. "~ + (1 | sample_number)") apply_voom = FALSE # Whether to apply `voomWithDreamWeights` ) @@ -159,6 +159,9 @@ results <- topTable(fitmm, coef = coef_name, write.table(results, file = paste(opt\$output_prefix, 'dream.results.tsv', sep = '.'), col.names = TRUE, row.names = FALSE, sep = '\t', quote = FALSE ) +# Save model to file +write(deparse(form), file=paste(opt\$output_prefix, 'dream.model.txt', sep = '.')) + ################################################ ################################################ ## VERSIONS FILE ## diff --git a/modules/nf-core/variancepartition/dream/tests/main.nf.test b/modules/nf-core/variancepartition/dream/tests/main.nf.test index 107e238f16fd..319e444dcaa9 100644 --- a/modules/nf-core/variancepartition/dream/tests/main.nf.test +++ b/modules/nf-core/variancepartition/dream/tests/main.nf.test @@ -14,17 +14,13 @@ nextflow_process { when { process { """ - // contrast data + counts + samplesheet - input[0] = Channel.of( - [ - [ - contrast_id: 'treatment_mCherry_hND6', - contrast_variable: 'treatment', - contrast_reference: 'mCherry', - contrast_target: 'hND6', - blocking_factors: null, - formula: null - ], + input[0] = Channel.of(['id': 'treatment_mCherry_hND6', 'variable': 'treatment', 'reference': 'mCherry', 'target': 'hND6', 'blocking_factors':null, 'formula':null]) + .map{ + tuple(it, it.variable, it.reference, it.target, it.formula) + } + + input[1] = Channel.of([ + [ id:'test' ], file("https://raw.githubusercontent.com/nf-core/test-datasets/refs/heads/modules/data/genomics/mus_musculus/rnaseq_expression/SRP254919.samplesheet.csv", checkIfExists: true), file("https://raw.githubusercontent.com/nf-core/test-datasets/refs/heads/modules/data/genomics/mus_musculus/rnaseq_expression/SRP254919.salmon.merged.gene_counts.top1000cov.tsv", checkIfExists: true) ] @@ -45,17 +41,13 @@ nextflow_process { when { process { """ - // contrast data + counts + samplesheet - input[0] = Channel.of( - [ - [ - contrast_id: 'treatment_mCherry_hND6', - contrast_variable: 'treatment', - contrast_reference: 'mCherry', - contrast_target: 'hND6', - blocking_factors: 'sample_number', - formula: '~ treatment + (1 | sample_number)' - ], + input[0] = Channel.of(['id': 'treatment_mCherry_hND6', 'variable': 'treatment', 'reference': 'mCherry', 'target': 'hND6', 'blocking_factors':'sample_number', 'formula':'~ treatment + (1 | sample_number)']) + .map{ + tuple(it, it.variable, it.reference, it.target, it.formula) + } + + input[1] = Channel.of([ + [ id:'test' ], file("https://raw.githubusercontent.com/nf-core/test-datasets/refs/heads/modules/data/genomics/mus_musculus/rnaseq_expression/SRP254919.samplesheet.csv", checkIfExists: true), file("https://raw.githubusercontent.com/nf-core/test-datasets/refs/heads/modules/data/genomics/mus_musculus/rnaseq_expression/SRP254919.salmon.merged.gene_counts.top1000cov.tsv", checkIfExists: true) ] @@ -78,17 +70,13 @@ nextflow_process { when { process { """ - // contrast data + counts + samplesheet - input[0] = Channel.of( - [ - [ - contrast_id: 'treatment_mCherry_hND6', - contrast_variable: 'treatment', - contrast_reference: 'mCherry', - contrast_target: 'hND6', - blocking_factors: 'sample_number', - formula: null - ], + input[0] = Channel.of(['id': 'treatment_mCherry_hND6', 'variable': 'treatment', 'reference': 'mCherry', 'target': 'hND6', 'blocking_factors':'sample_number', 'formula':'~ treatment + (1 | sample_number)']) + .map{ + tuple(it, it.variable, it.reference, it.target, it.formula) + } + + input[1] = Channel.of([ + [ id:'test' ], file("https://raw.githubusercontent.com/nf-core/test-datasets/refs/heads/modules/data/genomics/mus_musculus/rnaseq_expression/SRP254919.samplesheet.csv", checkIfExists: true), file("https://raw.githubusercontent.com/nf-core/test-datasets/refs/heads/modules/data/genomics/mus_musculus/rnaseq_expression/SRP254919.salmon.merged.gene_counts.top1000cov.tsv", checkIfExists: true) ] diff --git a/modules/nf-core/variancepartition/dream/tests/main.nf.test.snap b/modules/nf-core/variancepartition/dream/tests/main.nf.test.snap index ca054e3fa0f0..34257f3b24d1 100644 --- a/modules/nf-core/variancepartition/dream/tests/main.nf.test.snap +++ b/modules/nf-core/variancepartition/dream/tests/main.nf.test.snap @@ -5,10 +5,10 @@ "0": [ [ { - "contrast_id": "treatment_mCherry_hND6", - "contrast_variable": "treatment", - "contrast_reference": "mCherry", - "contrast_target": "hND6", + "id": "treatment_mCherry_hND6", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", "blocking_factors": "sample_number", "formula": "~ treatment + (1 | sample_number)" }, @@ -16,15 +16,41 @@ ] ], "1": [ + [ + { + "id": "treatment_mCherry_hND6", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", + "blocking_factors": "sample_number", + "formula": "~ treatment + (1 | sample_number)" + }, + "treatment_mCherry_hND6.dream.model.txt:md5,7103206474aa480ffd9cec149263489f" + ] + ], + "2": [ "versions.yml:md5,fc1f26eb2194018e99fc2916332676b7" ], + "model": [ + [ + { + "id": "treatment_mCherry_hND6", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", + "blocking_factors": "sample_number", + "formula": "~ treatment + (1 | sample_number)" + }, + "treatment_mCherry_hND6.dream.model.txt:md5,7103206474aa480ffd9cec149263489f" + ] + ], "results": [ [ { - "contrast_id": "treatment_mCherry_hND6", - "contrast_variable": "treatment", - "contrast_reference": "mCherry", - "contrast_target": "hND6", + "id": "treatment_mCherry_hND6", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", "blocking_factors": "sample_number", "formula": "~ treatment + (1 | sample_number)" }, @@ -40,7 +66,7 @@ "nf-test": "0.9.2", "nextflow": "24.10.5" }, - "timestamp": "2025-03-13T14:40:25.867780675" + "timestamp": "2025-03-31T15:03:03.556133605" }, "Mus musculus - expression table - contrasts + blocking factors stub": { "content": [ @@ -48,28 +74,54 @@ "0": [ [ { - "contrast_id": "treatment_mCherry_hND6", - "contrast_variable": "treatment", - "contrast_reference": "mCherry", - "contrast_target": "hND6", + "id": "treatment_mCherry_hND6", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", "blocking_factors": "sample_number", - "formula": null + "formula": "~ treatment + (1 | sample_number)" }, "treatment_mCherry_hND6.dream.results.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" ] ], "1": [ + [ + { + "id": "treatment_mCherry_hND6", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", + "blocking_factors": "sample_number", + "formula": "~ treatment + (1 | sample_number)" + }, + "treatment_mCherry_hND6.dream.model.txt:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], + "2": [ "versions.yml:md5,03b686ec8c67a91501ebb2b2a5234e77" ], + "model": [ + [ + { + "id": "treatment_mCherry_hND6", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", + "blocking_factors": "sample_number", + "formula": "~ treatment + (1 | sample_number)" + }, + "treatment_mCherry_hND6.dream.model.txt:md5,d41d8cd98f00b204e9800998ecf8427e" + ] + ], "results": [ [ { - "contrast_id": "treatment_mCherry_hND6", - "contrast_variable": "treatment", - "contrast_reference": "mCherry", - "contrast_target": "hND6", + "id": "treatment_mCherry_hND6", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", "blocking_factors": "sample_number", - "formula": null + "formula": "~ treatment + (1 | sample_number)" }, "treatment_mCherry_hND6.dream.results.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" ] @@ -83,7 +135,7 @@ "nf-test": "0.9.2", "nextflow": "24.10.5" }, - "timestamp": "2025-03-13T14:40:39.315196311" + "timestamp": "2025-03-31T15:03:25.932056996" }, "Mus musculus - expression table - contrasts": { "content": [ diff --git a/subworkflows/nf-core/abundance_differential_filter/main.nf b/subworkflows/nf-core/abundance_differential_filter/main.nf index 75bea40daf8c..86cc060786ea 100644 --- a/subworkflows/nf-core/abundance_differential_filter/main.nf +++ b/subworkflows/nf-core/abundance_differential_filter/main.nf @@ -26,21 +26,22 @@ workflow ABUNDANCE_DIFFERENTIAL_FILTER { ch_samplesheet // [ meta_exp, samplesheet ] ch_transcript_lengths // [ meta_exp, transcript_lengths] ch_control_features // [meta_exp, control_features] - ch_contrasts // [[ meta_contrast, contrast_variable, reference, target ]] - ch_contrasts_dream // [meta_contrast, samplesheet, matrix] + ch_contrasts // [[ meta_contrast, contrast_variable, reference, target, formula ]] main: ch_versions = Channel.empty() // Set up how the channels crossed below will be used to generate channels for processing - def criteria = multiMapCriteria { meta_input, abundance, analysis_method, fc_threshold, stat_threshold, meta_exp, samplesheet, meta_contrasts, variable, reference, target -> + def criteria = multiMapCriteria { meta_input, abundance, analysis_method, fc_threshold, stat_threshold, meta_exp, samplesheet, meta_contrasts, variable, reference, target, formula -> def meta_for_diff = mergeMaps(meta_contrasts, meta_input) + [ 'method_differential': analysis_method ] def meta_input_new = meta_input + [ 'method_differential': analysis_method ] samples_and_matrix: [ meta_input_new, samplesheet, abundance ] contrasts_for_diff: [ meta_for_diff, variable, reference, target ] + contrasts_for_diff_with_formula: + [ meta_for_diff, variable, reference, target, formula ] filter_params: [ meta_for_diff, [ 'fc_threshold': fc_threshold, 'stat_threshold': stat_threshold ]] contrasts_for_norm: @@ -131,6 +132,23 @@ workflow ABUNDANCE_DIFFERENTIAL_FILTER { ch_versions = ch_versions.mix(PROPR_PROPD.out.versions.first()) + // ---------------------------------------------------- + // Run DREAM + // ---------------------------------------------------- + + // DREAM only runs with formula + dream_inputs = inputs.contrasts_for_diff_with_formula + .filter { meta, variable, reference, target, formula -> + meta.method_differential == 'dream' && formula != null + } + + VARIANCEPARTITION_DREAM( + dream_inputs, + inputs.samples_and_matrix.filter{ it[0].method_differential == 'dream' } + ) + + ch_versions = ch_versions.mix( VARIANCEPARTITION_DREAM.out.versions.first() ) + // ---------------------------------------------------- // Collect results // ---------------------------------------------------- @@ -138,12 +156,14 @@ workflow ABUNDANCE_DIFFERENTIAL_FILTER { ch_results = DESEQ2_DIFFERENTIAL.out.results .mix(LIMMA_DIFFERENTIAL.out.results) .mix(PROPR_PROPD.out.results_genewise) + .mix(VARIANCEPARTITION_DREAM.out.results) ch_normalised_matrix = DESEQ2_NORM.out.normalised_counts .mix(LIMMA_NORM.out.normalised_counts) ch_model = DESEQ2_DIFFERENTIAL.out.model .mix(LIMMA_DIFFERENTIAL.out.model) + .mix(VARIANCEPARTITION_DREAM.out.model) ch_variance_stabilised_matrix = DESEQ2_NORM.out.rlog_counts .mix(DESEQ2_NORM.out.vst_counts) @@ -168,6 +188,10 @@ workflow ABUNDANCE_DIFFERENTIAL_FILTER { 'propd' : [ fc_column: 'lfc', fc_cardinality: '>=', stat_column: 'weighted_connectivity', stat_cardinality: '>=' + ], + 'dream' : [ + fc_column: 'logFC', fc_cardinality: '>=', + stat_column: 'adj.P.Val', stat_cardinality: '<=' ] ] filter_input: [meta + filter_meta, results] @@ -191,18 +215,10 @@ workflow ABUNDANCE_DIFFERENTIAL_FILTER { ) ch_versions = ch_versions.mix(CUSTOM_FILTERDIFFERENTIALTABLE.out.versions.first()) - ch_dream_results = Channel.empty() - - VARIANCEPARTITION_DREAM(ch_contrasts_dream) - ch_versions = ch_versions.mix( VARIANCEPARTITION_DREAM.out.versions.first() ) - ch_dream_results = VARIANCEPARTITION_DREAM.out.results - - emit: // main results results_genewise = ch_results results_genewise_filtered = CUSTOM_FILTERDIFFERENTIALTABLE.out.filtered - results_dream = ch_dream_results // pairwise results adjacency = PROPR_PROPD.out.adjacency diff --git a/subworkflows/nf-core/abundance_differential_filter/tests/main.nf.test b/subworkflows/nf-core/abundance_differential_filter/tests/main.nf.test index 7bc99257399a..2d7b968dd60a 100644 --- a/subworkflows/nf-core/abundance_differential_filter/tests/main.nf.test +++ b/subworkflows/nf-core/abundance_differential_filter/tests/main.nf.test @@ -21,48 +21,30 @@ nextflow_workflow { when { workflow { """ - // Define test data + def testData = [ expression_test_data_dir: params.modules_testdata_base_path + 'genomics/mus_musculus/rnaseq_expression/', contrasts_file: 'SRP254919.contrasts.csv', - samplesheet_file: 'SRP254919.samplesheet.csv', - dream_matrix_file: 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv' + abundance_file: 'SRP254919.salmon.merged.gene_counts.top1000cov.tsv', + samplesheet_file: 'SRP254919.samplesheet.csv' ] - // Input (not used) - ch_input = Channel.of([ - [ id:'test' ], - file(testData.expression_test_data_dir + 'dummy_counts.tsv'), - 'dream', // analysis method - 1.5, // FC threshold - 0.05 // stat (adjusted p-value) threshold - ]) - - // Define additional workflow inputs (not used) - ch_samplesheet = Channel.of([ + ch_samplesheet = Channel.of([ [ id:'test' ], file(testData.expression_test_data_dir + testData.samplesheet_file) ]) ch_transcript_lengths = Channel.of([ [], [] ]) ch_control_features = Channel.of([ [], [] ]) - ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) - .splitCsv ( header:true, sep:',' ) + ch_contrasts = Channel.of(['id': 'treatment_mCherry_hND6', 'variable': 'treatment', 'reference': 'mCherry', 'target': 'hND6', 'blocking_factors':'sample_number', 'formula':'~ treatment + (1 | sample_number)']) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } - - // Contrasts dream input (used) - ch_dream_input = Channel.of([ - [ id: 'test', - contrast_id: 'treatment_mCherry_hND6', - contrast_variable: 'treatment', - contrast_reference: 'mCherry', - contrast_target: 'hND6', - blocking_factors: 'sample_number', - formula: '~ treatment + (1 | sample_number)' - ], - file(testData.expression_test_data_dir + testData.samplesheet_file), - file(testData.expression_test_data_dir + testData.dream_matrix_file) + ch_input = Channel.of([ + [ id:'test' ], + file(testData.expression_test_data_dir + testData.abundance_file), + 'dream', // analysis method + 1.5, // FC threshold + 0.05 // stat (adjusted p-value) threshold ]) input[0] = ch_input @@ -70,7 +52,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = ch_dream_input """ } } @@ -78,7 +59,9 @@ nextflow_workflow { assertAll( { assert workflow.success }, { assert snapshot( - workflow.out.results_dream, + workflow.out.results_genewise, + workflow.out.results_genewise_filtered, + workflow.out.model, workflow.out.versions ).match() } ) @@ -110,7 +93,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of([ [ id:'test' ], @@ -125,7 +108,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -195,7 +177,7 @@ nextflow_workflow { ch_control_features = Channel.of([ [], [] ]) ch_contrasts = Channel.of(['id': 'diagnosis_normal_uremia', 'variable': 'diagnosis', 'reference': 'normal', 'target': 'uremia']) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = AFFY_JUSTRMA.out.expression.map{ meta, file -> [ meta, @@ -210,8 +192,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() - """ } } @@ -255,7 +235,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of([ [ id:'test' ], @@ -270,7 +250,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -317,7 +296,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of([ [ id:'test' ], @@ -332,7 +311,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -376,7 +354,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of([ [ id:'test' ], @@ -391,7 +369,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -433,7 +410,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of( [ @@ -457,7 +434,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -503,7 +479,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of( [ @@ -534,7 +510,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -583,7 +558,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of([ [ id:'test' ], @@ -598,7 +573,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } diff --git a/subworkflows/nf-core/abundance_differential_filter/tests/main.nf.test.snap b/subworkflows/nf-core/abundance_differential_filter/tests/main.nf.test.snap index e2dd9c911219..0bd9aedbf07f 100644 --- a/subworkflows/nf-core/abundance_differential_filter/tests/main.nf.test.snap +++ b/subworkflows/nf-core/abundance_differential_filter/tests/main.nf.test.snap @@ -330,18 +330,49 @@ [ [ { - "id": "test", - "contrast_id": "treatment_mCherry_hND6", - "contrast_variable": "treatment", - "contrast_reference": "mCherry", - "contrast_target": "hND6", + "id": "treatment_mCherry_hND6_test", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", "blocking_factors": "sample_number", - "formula": "~ treatment + (1 | sample_number)" + "formula": "~ treatment + (1 | sample_number)", + "method_differential": "dream" }, - "treatment_mCherry_hND6.dream.results.tsv:md5,37742ab4ac3152e34339ef423ab1a0a3" + "treatment_mCherry_hND6_test.dream.results.tsv:md5,37742ab4ac3152e34339ef423ab1a0a3" ] ], [ + [ + { + "id": "treatment_mCherry_hND6_test", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", + "blocking_factors": "sample_number", + "formula": "~ treatment + (1 | sample_number)", + "method_differential": "dream", + "fc_threshold": 1.5, + "stat_threshold": 0.05 + }, + "treatment_mCherry_hND6_test_filtered.tsv:md5,9642188f60e8b93abe55a85e99f3dedc" + ] + ], + [ + [ + { + "id": "treatment_mCherry_hND6_test", + "variable": "treatment", + "reference": "mCherry", + "target": "hND6", + "blocking_factors": "sample_number", + "formula": "~ treatment + (1 | sample_number)", + "method_differential": "dream" + }, + "treatment_mCherry_hND6_test.dream.model.txt:md5,7103206474aa480ffd9cec149263489f" + ] + ], + [ + "versions.yml:md5,1c02d4e455e8f3809c8ce37bee947690", "versions.yml:md5,736da31f06f854355d45aeb9d9c874e0" ] ], @@ -349,7 +380,7 @@ "nf-test": "0.9.2", "nextflow": "24.10.5" }, - "timestamp": "2025-03-13T14:51:50.020742089" + "timestamp": "2025-03-31T18:16:12.171347414" }, "stub": { "content": [ @@ -410,9 +441,6 @@ ], "3": [ - - ], - "4": [ [ { "id": "test", @@ -421,7 +449,7 @@ "test.normalised_counts.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" ] ], - "5": [ + "4": [ [ { "id": "test", @@ -432,7 +460,7 @@ ] ] ], - "6": [ + "5": [ [ { "id": "treatment_mCherry_hND6__test", @@ -456,7 +484,7 @@ "treatment_mCherry_hND6_sample_number_test.deseq2.model.txt:md5,d41d8cd98f00b204e9800998ecf8427e" ] ], - "7": [ + "6": [ "versions.yml:md5,05e3901f6d78f8839a7e07f422e9bc03", "versions.yml:md5,1d567f203085b6ae7b621d5587260a23", "versions.yml:md5,50cd86004ca6259274b10316b1b96f00" @@ -496,9 +524,6 @@ }, "test.normalised_counts.tsv:md5,d41d8cd98f00b204e9800998ecf8427e" ] - ], - "results_dream": [ - ], "results_genewise": [ [ @@ -574,7 +599,7 @@ "nf-test": "0.9.2", "nextflow": "24.10.5" }, - "timestamp": "2025-03-11T17:52:36.213485364" + "timestamp": "2025-03-31T18:43:26.944581418" }, "deseq2 - mouse - basic": { "content": [ diff --git a/subworkflows/nf-core/differential_functional_enrichment/main.nf b/subworkflows/nf-core/differential_functional_enrichment/main.nf index 802812a5d938..e219a7d0dafc 100644 --- a/subworkflows/nf-core/differential_functional_enrichment/main.nf +++ b/subworkflows/nf-core/differential_functional_enrichment/main.nf @@ -26,7 +26,7 @@ workflow DIFFERENTIAL_FUNCTIONAL_ENRICHMENT { // other - for the moment these files are only needed for GSEA // as it is the only one that takes expression data as input // if in the future this setting is changed, this section could be removed - ch_contrasts // [ meta_contrast, contrast_variable, reference, target ] + ch_contrasts // [ meta_contrast, contrast_variable, reference, target, formula ] ch_samplesheet // [ meta_exp, samples sheet ] ch_featuresheet // [ meta_exp, features sheet, features id, features symbol ] @@ -53,7 +53,7 @@ workflow DIFFERENTIAL_FUNCTIONAL_ENRICHMENT { // In the case of GSEA, it needs additional files coming from other channels that other methods don't use // here we define the input channel for the GSEA section - def criteria = multiMapCriteria { meta_input, input, genesets, meta_exp, samplesheet, featuresheet, features_id, features_symbol, meta_contrasts, variable, reference, target -> + def criteria = multiMapCriteria { meta_input, input, genesets, meta_exp, samplesheet, featuresheet, features_id, features_symbol, meta_contrasts, variable, reference, target, _formula -> def meta_contrasts_new = meta_contrasts + [ 'variable': variable, 'reference': reference, 'target': target ] // make sure variable, reference, target are in the meta def meta_all = mergeMaps(meta_contrasts_new, meta_input) input: diff --git a/subworkflows/nf-core/differential_functional_enrichment/tests/main.nf.test b/subworkflows/nf-core/differential_functional_enrichment/tests/main.nf.test index d71e7a508eb0..145688c46687 100644 --- a/subworkflows/nf-core/differential_functional_enrichment/tests/main.nf.test +++ b/subworkflows/nf-core/differential_functional_enrichment/tests/main.nf.test @@ -37,7 +37,7 @@ nextflow_workflow { ]) input[0] = ch_input - input[1] = Channel.of([[], [], [], []]) + input[1] = Channel.of([[], [], [], [], []]) input[2] = Channel.of([[], []]) input[3] = Channel.of([[], [], [], []]) """ @@ -85,7 +85,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of([ [ id:'test' ], @@ -100,7 +100,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -115,7 +114,7 @@ nextflow_workflow { } input[0] = ch_input - input[1] = Channel.of([[], [], [], []]) + input[1] = Channel.of([[], [], [], [], []]) input[2] = Channel.of([[], []]) input[3] = Channel.of([[], [], [], []]) """ @@ -163,7 +162,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of([ [ id:'test' ], @@ -178,7 +177,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -205,7 +203,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_samplesheet = Channel.of([ [ id:'test' ], @@ -265,7 +263,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of([ [ id:'test' ], @@ -280,7 +278,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -296,7 +293,7 @@ nextflow_workflow { } input[0] = ch_input - input[1] = Channel.of([[], [], [], []]) + input[1] = Channel.of([[], [], [], [], []]) input[2] = Channel.of([[], []]) input[3] = Channel.of([[], [], [], []]) """ @@ -342,7 +339,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.expression_test_data_dir + testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_input = Channel.of( [ @@ -373,7 +370,6 @@ nextflow_workflow { input[2] = ch_transcript_lengths input[3] = ch_control_features input[4] = ch_contrasts - input[5] = Channel.empty() """ } } @@ -410,7 +406,7 @@ nextflow_workflow { ch_contrasts = Channel.fromPath(file(testData.contrasts_file)) .splitCsv ( header:true, sep:',' ) .map{ - tuple(it, it.variable, it.reference, it.target) + tuple(it, it.variable, it.reference, it.target, it.formula) } ch_samplesheet = Channel.of([ [ id:'test' ], @@ -464,7 +460,7 @@ nextflow_workflow { ]) input[0] = ch_input - input[1] = Channel.of([[], [], [], []]) + input[1] = Channel.of([[], [], [], [], []]) input[2] = Channel.of([[], []]) input[3] = Channel.of([[], [], [], []]) """