diff --git a/main.nf b/main.nf index 4b3b79ac8..c208bc9ec 100755 --- a/main.nf +++ b/main.nf @@ -9,11 +9,7 @@ ---------------------------------------------------------------------------------------- */ -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - GENOME PARAMETER VALUES -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ +nextflow.preview.output = true params.fasta = getGenomeAttribute('fasta') params.additional_fasta = getGenomeAttribute('additional_fasta') @@ -29,32 +25,25 @@ params.hisat2_index = getGenomeAttribute('hisat2') params.salmon_index = getGenomeAttribute('salmon') params.kallisto_index = getGenomeAttribute('kallisto') -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - IMPORT FUNCTIONS / MODULES / SUBWORKFLOWS / WORKFLOWS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - include { RNASEQ } from './workflows/rnaseq' include { PREPARE_GENOME } from './subworkflows/local/prepare_genome' include { PIPELINE_INITIALISATION } from './subworkflows/local/utils_nfcore_rnaseq_pipeline' include { PIPELINE_COMPLETION } from './subworkflows/local/utils_nfcore_rnaseq_pipeline' include { checkMaxContigSize } from './subworkflows/local/utils_nfcore_rnaseq_pipeline' -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - NAMED WORKFLOWS FOR PIPELINE -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - -// -// WORKFLOW: Run main analysis pipeline -// -workflow NFCORE_RNASEQ { +workflow { main: - - ch_versions = Channel.empty() + // + // SUBWORKFLOW: Run initialisation tasks + // + PIPELINE_INITIALISATION ( + params.version, + params.validate_params, + params.monochrome_logs, + args, + params.outdir + ) // // SUBWORKFLOW: Prepare reference genome files @@ -86,7 +75,6 @@ workflow NFCORE_RNASEQ { params.skip_alignment, params.skip_pseudo_alignment ) - ch_versions = ch_versions.mix(PREPARE_GENOME.out.versions) // Check if contigs in genome fasta file > 512 Mbp if (!params.skip_alignment && !params.bam_csi_index) { @@ -96,13 +84,33 @@ workflow NFCORE_RNASEQ { .map { checkMaxContigSize(it) } } + ch_genome = Channel.empty().mix( + PREPARE_GENOME.out.fasta, + PREPARE_GENOME.out.gtf, + PREPARE_GENOME.out.gff, + PREPARE_GENOME.out.add_fasta, + PREPARE_GENOME.out.gene_bed, + PREPARE_GENOME.out.transcript_fasta, + PREPARE_GENOME.out.fai, + PREPARE_GENOME.out.chrom_sizes, + ) + + ch_genome_index = Channel.empty().mix( + PREPARE_GENOME.out.splicesites, + PREPARE_GENOME.out.bbsplit_index, + PREPARE_GENOME.out.star_index, + PREPARE_GENOME.out.rsem_index, + PREPARE_GENOME.out.hisat2_index, + PREPARE_GENOME.out.salmon_index, + PREPARE_GENOME.out.kallisto_index, + ) + // // WORKFLOW: Run nf-core/rnaseq workflow // ch_samplesheet = Channel.value(file(params.input, checkIfExists: true)) RNASEQ ( ch_samplesheet, - ch_versions, PREPARE_GENOME.out.fasta, PREPARE_GENOME.out.gtf, PREPARE_GENOME.out.fai, @@ -119,40 +127,6 @@ workflow NFCORE_RNASEQ { PREPARE_GENOME.out.sortmerna_index, PREPARE_GENOME.out.splicesites ) - ch_versions = ch_versions.mix(RNASEQ.out.versions) - - emit: - trim_status = RNASEQ.out.trim_status // channel: [id, boolean] - map_status = RNASEQ.out.map_status // channel: [id, boolean] - strand_status = RNASEQ.out.strand_status // channel: [id, boolean] - multiqc_report = RNASEQ.out.multiqc_report // channel: /path/to/multiqc_report.html - versions = ch_versions // channel: [version1, version2, ...] -} - -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - RUN MAIN WORKFLOW -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - -workflow { - - main: - // - // SUBWORKFLOW: Run initialisation tasks - // - PIPELINE_INITIALISATION ( - params.version, - params.validate_params, - params.monochrome_logs, - args, - params.outdir - ) - - // - // WORKFLOW: Run main workflow - // - NFCORE_RNASEQ () // // SUBWORKFLOW: Run completion tasks @@ -164,18 +138,68 @@ workflow { params.outdir, params.monochrome_logs, params.hook_url, - NFCORE_RNASEQ.out.multiqc_report, - NFCORE_RNASEQ.out.trim_status, - NFCORE_RNASEQ.out.map_status, - NFCORE_RNASEQ.out.strand_status + RNASEQ.out.multiqc_report, + RNASEQ.out.trim_status, + RNASEQ.out.map_status, + RNASEQ.out.strand_status ) + + publish: + genome = ch_genome + genome_index = ch_genome_index + star_salmon = RNASEQ.out.star_salmon + star_salmon_deseq_qc = RNASEQ.out.star_salmon_deseq_qc + star_rsem = RNASEQ.out.star_rsem + star_rsem_deseq_qc = RNASEQ.out.star_rsem_deseq_qc + hisat2 = RNASEQ.out.hisat2 + multiqc_report = RNASEQ.out.multiqc_report + multiqc_data = RNASEQ.out.multiqc_data + multiqc_plots = RNASEQ.out.multiqc_plots } -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - FUNCTIONS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ +output { + genome { + enabled params.save_reference + path 'genome' + } + + genome_index { + enabled params.save_reference + path 'genome/index' + } + + star_salmon { + path 'star_salmon' + } + + star_salmon_deseq_qc { + path 'star_salmon/deseq2_qc' + } + + star_rsem { + path 'star_rsem' + } + + star_rsem_deseq_qc { + path 'star_rsem/deseq2_qc' + } + + hisat2 { + path 'hisat2' + } + + multiqc_report { + path params.skip_alignment ? 'multiqc' : "multiqc/${params.aligner}" + } + + multiqc_data { + path params.skip_alignment ? 'multiqc' : "multiqc/${params.aligner}" + } + + multiqc_plots { + path params.skip_alignment ? 'multiqc' : "multiqc/${params.aligner}" + } +} // // Get attribute from genome config file e.g. fasta @@ -189,9 +213,3 @@ def getGenomeAttribute(attribute) { } return null } - -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - THE END -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ diff --git a/modules/nf-core/multiqc/main.nf b/modules/nf-core/multiqc/main.nf index 0ac3c3699..59ba52927 100644 --- a/modules/nf-core/multiqc/main.nf +++ b/modules/nf-core/multiqc/main.nf @@ -18,7 +18,8 @@ process MULTIQC { path "*multiqc_report.html", emit: report path "*_data" , emit: data path "*_plots" , optional:true, emit: plots - path "versions.yml" , emit: versions + + tuple val("${task.process}"), val('multiqc'), eval('multiqc --version | sed -e "s/multiqc, version //g"'), topic: versions when: task.ext.when == null || task.ext.when @@ -42,11 +43,6 @@ process MULTIQC { $replace \\ $samples \\ . - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) - END_VERSIONS """ stub: @@ -54,10 +50,5 @@ process MULTIQC { mkdir multiqc_data mkdir multiqc_plots touch multiqc_report.html - - cat <<-END_VERSIONS > versions.yml - "${task.process}": - multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" ) - END_VERSIONS """ } diff --git a/modules/nf-core/multiqc/nextflow.config b/modules/nf-core/multiqc/nextflow.config index 0714dbccd..9b1c528b8 100644 --- a/modules/nf-core/multiqc/nextflow.config +++ b/modules/nf-core/multiqc/nextflow.config @@ -5,14 +5,6 @@ if (!params.skip_multiqc) { ((params.multiqc_title == null) ? '' : "--title \"${params.multiqc_title}\"") ].join(' ').trim() } ext.prefix = "multiqc_report" - publishDir = [ - path: { [ - "${params.outdir}/multiqc", - params.skip_alignment? '' : "/${params.aligner}" - ].join('') }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } - ] } } } diff --git a/nextflow.config b/nextflow.config index 1755be5bc..9688eba72 100644 --- a/nextflow.config +++ b/nextflow.config @@ -142,13 +142,8 @@ params { } // Default publishing logic for pipeline -process { - publishDir = [ - path: { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } - ] -} +outputDir = params.outdir +workflow.output.mode = params.publish_dir_mode // Load base.config by default for all pipelines includeConfig 'conf/base.config' diff --git a/subworkflows/local/align_star/main.nf b/subworkflows/local/align_star/main.nf index 4f516e8e0..da5a32760 100644 --- a/subworkflows/local/align_star/main.nf +++ b/subworkflows/local/align_star/main.nf @@ -19,8 +19,6 @@ workflow ALIGN_STAR { main: - ch_versions = Channel.empty() - // // Map reads with STAR // @@ -42,7 +40,6 @@ workflow ALIGN_STAR { ch_bam_transcript = STAR_ALIGN_IGENOMES.out.bam_transcript ch_fastq = STAR_ALIGN_IGENOMES.out.fastq ch_tab = STAR_ALIGN_IGENOMES.out.tab - ch_versions = ch_versions.mix(STAR_ALIGN_IGENOMES.out.versions.first()) } else { STAR_ALIGN ( reads, index, gtf, star_ignore_sjdbgtf, seq_platform, seq_center ) ch_orig_bam = STAR_ALIGN.out.bam @@ -53,14 +50,12 @@ workflow ALIGN_STAR { ch_bam_transcript = STAR_ALIGN.out.bam_transcript ch_fastq = STAR_ALIGN.out.fastq ch_tab = STAR_ALIGN.out.tab - ch_versions = ch_versions.mix(STAR_ALIGN.out.versions.first()) } // // Sort, index BAM file and run samtools stats, flagstat and idxstats // BAM_SORT_STATS_SAMTOOLS ( ch_orig_bam, fasta ) - ch_versions = ch_versions.mix(BAM_SORT_STATS_SAMTOOLS.out.versions) emit: orig_bam = ch_orig_bam // channel: [ val(meta), bam ] @@ -79,5 +74,4 @@ workflow ALIGN_STAR { flagstat = BAM_SORT_STATS_SAMTOOLS.out.flagstat // channel: [ val(meta), [ flagstat ] ] idxstats = BAM_SORT_STATS_SAMTOOLS.out.idxstats // channel: [ val(meta), [ idxstats ] ] - versions = ch_versions // channel: [ versions.yml ] } diff --git a/subworkflows/local/prepare_genome/main.nf b/subworkflows/local/prepare_genome/main.nf index f4c82778e..44aa9b22f 100644 --- a/subworkflows/local/prepare_genome/main.nf +++ b/subworkflows/local/prepare_genome/main.nf @@ -65,8 +65,6 @@ workflow PREPARE_GENOME { skip_pseudo_alignment // boolean: Skip all of the pseudoalignment-based processes within the pipeline main: - // Versions collector - ch_versions = Channel.empty() //--------------------------- // 1) Uncompress GTF or GFF -> GTF @@ -75,20 +73,16 @@ workflow PREPARE_GENOME { if (gtf) { if (gtf.endsWith('.gz')) { ch_gtf = GUNZIP_GTF ([ [:], file(gtf, checkIfExists: true) ]).gunzip.map { it[1] } - ch_versions = ch_versions.mix(GUNZIP_GTF.out.versions) } else { ch_gtf = Channel.value(file(gtf, checkIfExists: true)) } } else if (gff) { - def ch_gff if (gff.endsWith('.gz')) { ch_gff = GUNZIP_GFF ([ [:], file(gff, checkIfExists: true) ]).gunzip - ch_versions = ch_versions.mix(GUNZIP_GFF.out.versions) } else { ch_gff = Channel.value(file(gff, checkIfExists: true)).map { [ [:], it ] } } ch_gtf = GFFREAD(ch_gff, []).gtf.map { it[1] } - ch_versions = ch_versions.mix(GFFREAD.out.versions) } //------------------------------------- @@ -101,7 +95,6 @@ workflow PREPARE_GENOME { // Uncompress FASTA if needed if (fasta.endsWith('.gz')) { ch_fasta = GUNZIP_FASTA ([ [:], file(fasta, checkIfExists: true) ]).gunzip.map { it[1] } - ch_versions = ch_versions.mix(GUNZIP_FASTA.out.versions) } else { ch_fasta = Channel.value(file(fasta, checkIfExists: true)) } @@ -119,7 +112,6 @@ workflow PREPARE_GENOME { if (filter_gtf_needed) { GTF_FILTER(ch_fasta, ch_gtf) ch_gtf = GTF_FILTER.out.genome_gtf.first() - ch_versions = ch_versions.mix(GTF_FILTER.out.versions) } //--------------------------------------------------- @@ -129,7 +121,6 @@ workflow PREPARE_GENOME { if (fasta_provided && additional_fasta) { if (additional_fasta.endsWith('.gz')) { ch_add_fasta = GUNZIP_ADDITIONAL_FASTA([ [:], file(additional_fasta, checkIfExists: true) ]).gunzip.map { it[1] } - ch_versions = ch_versions.mix(GUNZIP_ADDITIONAL_FASTA.out.versions) } else { ch_add_fasta = Channel.value(file(additional_fasta, checkIfExists: true)) } @@ -141,7 +132,6 @@ workflow PREPARE_GENOME { ) ch_fasta = CUSTOM_CATADDITIONALFASTA.out.fasta.map { it[1] }.first() ch_gtf = CUSTOM_CATADDITIONALFASTA.out.gtf.map { it[1] }.first() - ch_versions = ch_versions.mix(CUSTOM_CATADDITIONALFASTA.out.versions) } //------------------------------------------------------ @@ -151,13 +141,11 @@ workflow PREPARE_GENOME { if (gene_bed) { if (gene_bed.endsWith('.gz')) { ch_gene_bed = GUNZIP_GENE_BED ([ [:], file(gene_bed, checkIfExists: true) ]).gunzip.map { it[1] } - ch_versions = ch_versions.mix(GUNZIP_GENE_BED.out.versions) } else { ch_gene_bed = Channel.value(file(gene_bed, checkIfExists: true)) } } else { ch_gene_bed = GTF2BED(ch_gtf).bed - ch_versions = ch_versions.mix(GTF2BED.out.versions) } //---------------------------------------------------------------------- @@ -170,19 +158,16 @@ workflow PREPARE_GENOME { // Use user-provided transcript FASTA if (transcript_fasta.endsWith('.gz')) { ch_transcript_fasta = GUNZIP_TRANSCRIPT_FASTA ([ [:], file(transcript_fasta, checkIfExists: true) ]).gunzip.map { it[1] } - ch_versions = ch_versions.mix(GUNZIP_TRANSCRIPT_FASTA.out.versions) } else { ch_transcript_fasta = Channel.value(file(transcript_fasta, checkIfExists: true)) } if (gencode) { PREPROCESS_TRANSCRIPTS_FASTA_GENCODE(ch_transcript_fasta) ch_transcript_fasta = PREPROCESS_TRANSCRIPTS_FASTA_GENCODE.out.fasta - ch_versions = ch_versions.mix(PREPROCESS_TRANSCRIPTS_FASTA_GENCODE.out.versions) } } else if (fasta_provided) { // Build transcripts from genome if we have it ch_transcript_fasta = MAKE_TRANSCRIPTS_FASTA(ch_fasta, ch_gtf).transcript_fasta - ch_versions = ch_versions.mix(MAKE_TRANSCRIPTS_FASTA.out.versions) } //------------------------------------------------------- @@ -194,7 +179,6 @@ workflow PREPARE_GENOME { CUSTOM_GETCHROMSIZES(ch_fasta.map { [ [:], it ] }) ch_fai = CUSTOM_GETCHROMSIZES.out.fai.map { it[1] } ch_chrom_sizes = CUSTOM_GETCHROMSIZES.out.sizes.map { it[1] } - ch_versions = ch_versions.mix(CUSTOM_GETCHROMSIZES.out.versions) } //------------------------------------------------ @@ -215,7 +199,6 @@ workflow PREPARE_GENOME { // Use user-provided bbsplit index if (bbsplit_index.endsWith('.tar.gz')) { ch_bbsplit_index = UNTAR_BBSPLIT_INDEX ([ [:], file(bbsplit_index, checkIfExists: true) ]).untar.map { it[1] } - ch_versions = ch_versions.mix(UNTAR_BBSPLIT_INDEX.out.versions) } else { ch_bbsplit_index = Channel.value(file(bbsplit_index, checkIfExists: true)) } @@ -238,7 +221,6 @@ workflow PREPARE_GENOME { ch_bbsplit_fasta_list, true ).index - ch_versions = ch_versions.mix(BBMAP_BBSPLIT.out.versions) } // else: no FASTA and no user-provided index -> remains empty } @@ -257,7 +239,6 @@ workflow PREPARE_GENOME { if (sortmerna_index) { if (sortmerna_index.endsWith('.tar.gz')) { ch_sortmerna_index = UNTAR_SORTMERNA_INDEX ([ [:], file(sortmerna_index, checkIfExists: true) ]).untar.map { it[1] } - ch_versions = ch_versions.mix(UNTAR_SORTMERNA_INDEX.out.versions) } else { ch_sortmerna_index = Channel.value([ [:], file(sortmerna_index, checkIfExists: true) ]) } @@ -269,7 +250,6 @@ workflow PREPARE_GENOME { Channel.of([ [], [] ]) ) ch_sortmerna_index = SORTMERNA_INDEX.out.index.first() - ch_versions = ch_versions.mix(SORTMERNA_INDEX.out.versions) } } @@ -281,7 +261,6 @@ workflow PREPARE_GENOME { if (star_index) { if (star_index.endsWith('.tar.gz')) { ch_star_index = UNTAR_STAR_INDEX ([ [:], file(star_index, checkIfExists: true) ]).untar.map { it[1] } - ch_versions = ch_versions.mix(UNTAR_STAR_INDEX.out.versions) } else { ch_star_index = Channel.value(file(star_index, checkIfExists: true)) } @@ -295,13 +274,11 @@ workflow PREPARE_GENOME { } if (is_aws_igenome) { ch_star_index = STAR_GENOMEGENERATE_IGENOMES(ch_fasta, ch_gtf).index - ch_versions = ch_versions.mix(STAR_GENOMEGENERATE_IGENOMES.out.versions) } else { ch_star_index = STAR_GENOMEGENERATE( ch_fasta.map { [ [:], it ] }, ch_gtf.map { [ [:], it ] } ).index.map { it[1] } - ch_versions = ch_versions.mix(STAR_GENOMEGENERATE.out.versions) } } } @@ -314,14 +291,12 @@ workflow PREPARE_GENOME { if (rsem_index) { if (rsem_index.endsWith('.tar.gz')) { ch_rsem_index = UNTAR_RSEM_INDEX ([ [:], file(rsem_index, checkIfExists: true) ]).untar.map { it[1] } - ch_versions = ch_versions.mix(UNTAR_RSEM_INDEX.out.versions) } else { ch_rsem_index = Channel.value(file(rsem_index, checkIfExists: true)) } } else if (fasta_provided) { ch_rsem_index = RSEM_PREPAREREFERENCE_GENOME(ch_fasta, ch_gtf).index - ch_versions = ch_versions.mix(RSEM_PREPAREREFERENCE_GENOME.out.versions) } } @@ -337,13 +312,11 @@ workflow PREPARE_GENOME { } else if (fasta_provided) { ch_splicesites = HISAT2_EXTRACTSPLICESITES(ch_gtf.map { [ [:], it ] }).txt.map { it[1] } - ch_versions = ch_versions.mix(HISAT2_EXTRACTSPLICESITES.out.versions) } // the index if (hisat2_index) { if (hisat2_index.endsWith('.tar.gz')) { ch_hisat2_index = UNTAR_HISAT2_INDEX ([ [:], file(hisat2_index, checkIfExists: true) ]).untar.map { it[1] } - ch_versions = ch_versions.mix(UNTAR_HISAT2_INDEX.out.versions) } else { ch_hisat2_index = Channel.value(file(hisat2_index, checkIfExists: true)) } @@ -354,7 +327,6 @@ workflow PREPARE_GENOME { ch_gtf.map { [ [:], it ] }, ch_splicesites.map { [ [:], it ] } ).index.map { it[1] } - ch_versions = ch_versions.mix(HISAT2_BUILD.out.versions) } } @@ -366,7 +338,6 @@ workflow PREPARE_GENOME { if (salmon_index) { if (salmon_index.endsWith('.tar.gz')) { ch_salmon_index = UNTAR_SALMON_INDEX ( [ [:], salmon_index ] ).untar.map { it[1] } - ch_versions = ch_versions.mix(UNTAR_SALMON_INDEX.out.versions) } else { ch_salmon_index = Channel.value(file(salmon_index)) } @@ -374,12 +345,10 @@ workflow PREPARE_GENOME { if (ch_transcript_fasta && fasta_provided) { // build from transcript FASTA + genome FASTA ch_salmon_index = SALMON_INDEX(ch_fasta, ch_transcript_fasta).index - ch_versions = ch_versions.mix(SALMON_INDEX.out.versions) } else if (ch_transcript_fasta) { // some Salmon module can run with just a transcript FASTA ch_salmon_index = SALMON_INDEX([], ch_transcript_fasta).index - ch_versions = ch_versions.mix(SALMON_INDEX.out.versions) } } @@ -390,14 +359,12 @@ workflow PREPARE_GENOME { if (kallisto_index) { if (kallisto_index.endsWith('.tar.gz')) { ch_kallisto_index = UNTAR_KALLISTO_INDEX ( [ [:], kallisto_index ] ).untar - ch_versions = ch_versions.mix(UNTAR_KALLISTO_INDEX.out.versions) } else { ch_kallisto_index = Channel.value([[:], file(kallisto_index)]) } } else { if ('kallisto' in prepare_tool_indices) { ch_kallisto_index = KALLISTO_INDEX ( ch_transcript_fasta.map { [ [:], it] } ).index - ch_versions = ch_versions.mix(KALLISTO_INDEX.out.versions) } } @@ -407,6 +374,8 @@ workflow PREPARE_GENOME { emit: fasta = ch_fasta // channel: path(genome.fasta) gtf = ch_gtf // channel: path(genome.gtf) + gff = ch_gff + add_fasta = ch_add_fasta fai = ch_fai // channel: path(genome.fai) gene_bed = ch_gene_bed // channel: path(gene.bed) transcript_fasta = ch_transcript_fasta // channel: path(transcript.fasta) @@ -420,5 +389,4 @@ workflow PREPARE_GENOME { hisat2_index = ch_hisat2_index // channel: path(hisat2/index/) salmon_index = ch_salmon_index // channel: path(salmon/index/) kallisto_index = ch_kallisto_index // channel: [ meta, path(kallisto/index/) ] - versions = ch_versions.ifEmpty(null) // channel: [ versions.yml ] } diff --git a/subworkflows/local/prepare_genome/nextflow.config b/subworkflows/local/prepare_genome/nextflow.config index b06aaa617..921f16659 100644 --- a/subworkflows/local/prepare_genome/nextflow.config +++ b/subworkflows/local/prepare_genome/nextflow.config @@ -1,40 +1,11 @@ process { - withName: 'GUNZIP_.*|MAKE_TRANSCRIPTS_FASTA' { - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] - } - withName: 'UNTAR_.*' { ext.args2 = '--no-same-owner' } - withName: 'UNTAR_.*|STAR_GENOMEGENERATE|STAR_GENOMEGENERATE_IGENOMES|HISAT2_BUILD' { - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome/index" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] - } - withName: 'GFFREAD' { ext.args = '--keep-exon-attrs -F -T' ext.prefix = { gff.simpleName } - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] - } - - withName: 'HISAT2_EXTRACTSPLICESITES' { - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome/index" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] } withName: 'SALMON_INDEX' { @@ -42,87 +13,25 @@ process { params.gencode ? '--gencode' : '', params.pseudo_aligner_kmer_size ? "-k ${params.pseudo_aligner_kmer_size}": '' ].join(' ').trim() } - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome/index" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] } withName: 'KALLISTO_INDEX' { ext.args = params.pseudo_aligner_kmer_size ? "-k ${params.pseudo_aligner_kmer_size}" : '' - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome/index" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] } withName: 'RSEM_PREPAREREFERENCE_GENOME' { ext.args = '--star' - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome/index" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] - } - - withName: 'GTF2BED' { - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] - } - - withName: 'CAT_ADDITIONAL_FASTA|PREPROCESS_TRANSCRIPTS_FASTA_GENCODE' { - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] } withName: 'GTF_FILTER' { ext.args = { params.skip_gtf_transcript_filter ?: '--skip_transcript_id_check' } - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] } - withName: 'CUSTOM_GETCHROMSIZES' { - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] + withName: '.*:PREPARE_GENOME:BBMAP_BBSPLIT' { + ext.args = 'build=1' } -} - -if (!params.skip_bbsplit && params.bbsplit_fasta_list) { - process { - withName: '.*:PREPARE_GENOME:BBMAP_BBSPLIT' { - ext.args = 'build=1' - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome/index" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> (filename != 'versions.yml' && params.save_reference) ? filename : null } - ] - } - } -} -if (params.remove_ribo_rna && params.ribo_database_manifest) { - process { - withName: 'SORTMERNA_INDEX' { - ext.args = '--index 1' - publishDir = [ - path: { params.save_reference ? "${params.outdir}/genome/sortmerna" : params.outdir }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : params.save_reference ? filename : null } - ] - } + withName: 'SORTMERNA_INDEX' { + ext.args = '--index 1' } } diff --git a/subworkflows/local/quantify_rsem/main.nf b/subworkflows/local/quantify_rsem/main.nf index 4b7473634..c0d576afa 100644 --- a/subworkflows/local/quantify_rsem/main.nf +++ b/subworkflows/local/quantify_rsem/main.nf @@ -14,19 +14,15 @@ workflow QUANTIFY_RSEM { main: - ch_versions = Channel.empty() - // // Quantify reads with RSEM // RSEM_CALCULATEEXPRESSION ( reads, index ) - ch_versions = ch_versions.mix(RSEM_CALCULATEEXPRESSION.out.versions.first()) // // Sort, index BAM file and run samtools stats, flagstat and idxstats // BAM_SORT_STATS_SAMTOOLS ( RSEM_CALCULATEEXPRESSION.out.bam_star, fasta ) - ch_versions = ch_versions.mix(BAM_SORT_STATS_SAMTOOLS.out.versions) // // Merge counts across samples @@ -35,7 +31,6 @@ workflow QUANTIFY_RSEM { RSEM_CALCULATEEXPRESSION.out.counts_gene.collect{it[1]}, // [meta, counts]: Collect the second element (counts files) in the channel across all samples RSEM_CALCULATEEXPRESSION.out.counts_transcript.collect{it[1]} ) - ch_versions = ch_versions.mix(RSEM_MERGE_COUNTS.out.versions) emit: counts_gene = RSEM_CALCULATEEXPRESSION.out.counts_gene // channel: [ val(meta), counts ] @@ -58,5 +53,4 @@ workflow QUANTIFY_RSEM { merged_counts_transcript = RSEM_MERGE_COUNTS.out.counts_transcript // path: *.transcript_counts.tsv merged_tpm_transcript = RSEM_MERGE_COUNTS.out.tpm_transcript // path: *.transcript_tpm.tsv - versions = ch_versions // channel: [ versions.yml ] } diff --git a/subworkflows/nf-core/bam_dedup_stats_samtools_umicollapse/main.nf b/subworkflows/nf-core/bam_dedup_stats_samtools_umicollapse/main.nf index 54c42b986..6bd876930 100644 --- a/subworkflows/nf-core/bam_dedup_stats_samtools_umicollapse/main.nf +++ b/subworkflows/nf-core/bam_dedup_stats_samtools_umicollapse/main.nf @@ -12,19 +12,15 @@ workflow BAM_DEDUP_STATS_SAMTOOLS_UMICOLLAPSE { main: - ch_versions = Channel.empty() - // // umicollapse in bam mode (thus hardcode mode input channel to 'bam') // UMICOLLAPSE ( ch_bam_bai, channel.value( 'bam' )) - ch_versions = ch_versions.mix(UMICOLLAPSE.out.versions.first()) // // Index BAM file and run samtools stats, flagstat and idxstats // SAMTOOLS_INDEX ( UMICOLLAPSE.out.bam ) - ch_versions = ch_versions.mix(SAMTOOLS_INDEX.out.versions.first()) ch_bam_bai_dedup = UMICOLLAPSE.out.bam .join(SAMTOOLS_INDEX.out.bai, by: [0], remainder: true) @@ -39,7 +35,6 @@ workflow BAM_DEDUP_STATS_SAMTOOLS_UMICOLLAPSE { } BAM_STATS_SAMTOOLS ( ch_bam_bai_dedup, [ [:], [] ] ) - ch_versions = ch_versions.mix(BAM_STATS_SAMTOOLS.out.versions) emit: bam = UMICOLLAPSE.out.bam // channel: [ val(meta), path(bam) ] @@ -51,5 +46,4 @@ workflow BAM_DEDUP_STATS_SAMTOOLS_UMICOLLAPSE { flagstat = BAM_STATS_SAMTOOLS.out.flagstat // channel: [ val(meta), path(flagstat) ] idxstats = BAM_STATS_SAMTOOLS.out.idxstats // channel: [ val(meta), path(idxstats) ] - versions = ch_versions // channel: [ path(versions.yml) ] } diff --git a/subworkflows/nf-core/bam_dedup_stats_samtools_umitools/main.nf b/subworkflows/nf-core/bam_dedup_stats_samtools_umitools/main.nf index fe6ff312f..39f63f1d6 100644 --- a/subworkflows/nf-core/bam_dedup_stats_samtools_umitools/main.nf +++ b/subworkflows/nf-core/bam_dedup_stats_samtools_umitools/main.nf @@ -13,19 +13,15 @@ workflow BAM_DEDUP_STATS_SAMTOOLS_UMITOOLS { main: - ch_versions = Channel.empty() - // // UMI-tools dedup // UMITOOLS_DEDUP ( ch_bam_bai, val_get_dedup_stats ) - ch_versions = ch_versions.mix(UMITOOLS_DEDUP.out.versions.first()) // // Index BAM file and run samtools stats, flagstat and idxstats // SAMTOOLS_INDEX ( UMITOOLS_DEDUP.out.bam ) - ch_versions = ch_versions.mix(SAMTOOLS_INDEX.out.versions.first()) ch_bam_bai_dedup = UMITOOLS_DEDUP.out.bam .join(SAMTOOLS_INDEX.out.bai, by: [0], remainder: true) @@ -40,7 +36,6 @@ workflow BAM_DEDUP_STATS_SAMTOOLS_UMITOOLS { } BAM_STATS_SAMTOOLS ( ch_bam_bai_dedup, [ [:], [] ] ) - ch_versions = ch_versions.mix(BAM_STATS_SAMTOOLS.out.versions) emit: bam = UMITOOLS_DEDUP.out.bam // channel: [ val(meta), path(bam) ] @@ -52,5 +47,4 @@ workflow BAM_DEDUP_STATS_SAMTOOLS_UMITOOLS { flagstat = BAM_STATS_SAMTOOLS.out.flagstat // channel: [ val(meta), path(flagstat) ] idxstats = BAM_STATS_SAMTOOLS.out.idxstats // channel: [ val(meta), path(idxstats) ] - versions = ch_versions // channel: [ path(versions.yml) ] } diff --git a/subworkflows/nf-core/bam_dedup_umi/main.nf b/subworkflows/nf-core/bam_dedup_umi/main.nf index d0a1ddb13..c4353422b 100644 --- a/subworkflows/nf-core/bam_dedup_umi/main.nf +++ b/subworkflows/nf-core/bam_dedup_umi/main.nf @@ -22,7 +22,6 @@ workflow BAM_DEDUP_UMI { ch_transcript_fasta // channel: [ val(meta), path(fasta) ] main: - ch_versions = Channel.empty() if (umi_dedup_tool != "umicollapse" && umi_dedup_tool != "umitools"){ error("Unknown umi_dedup_tool '${umi_dedup_tool}'") @@ -113,12 +112,6 @@ workflow BAM_DEDUP_UMI { .transpose() .map{it[1]} - // Record versions - - ch_versions = UMI_DEDUP_GENOME.out.versions - .mix(BAM_SORT_STATS_SAMTOOLS.out.versions) - .mix(UMITOOLS_PREPAREFORRSEM.out.versions) - emit: bam = UMI_DEDUP_GENOME.out.bam // channel: [ val(meta), path(bam) ] bai = bam_csi_index ? UMI_DEDUP_GENOME.out.csi : UMI_DEDUP_GENOME.out.bai // channel: [ val(meta), path(bai) ] @@ -128,5 +121,4 @@ workflow BAM_DEDUP_UMI { idxstats = UMI_DEDUP_GENOME.out.idxstats.mix(UMI_DEDUP_TRANSCRIPTOME.out.idxstats) // channel: [ val(meta), path(idxstats)] multiqc_files = ch_multiqc_files // channel: file transcriptome_bam = ch_dedup_transcriptome_bam // channel: [ val(meta), path(bam) ] - versions = ch_versions // channel: [ path(versions.yml) ] } diff --git a/subworkflows/nf-core/bam_markduplicates_picard/main.nf b/subworkflows/nf-core/bam_markduplicates_picard/main.nf index 4ea7416cc..3fa55167d 100644 --- a/subworkflows/nf-core/bam_markduplicates_picard/main.nf +++ b/subworkflows/nf-core/bam_markduplicates_picard/main.nf @@ -15,15 +15,11 @@ workflow BAM_MARKDUPLICATES_PICARD { main: - ch_versions = Channel.empty() - PICARD_MARKDUPLICATES ( ch_reads, ch_fasta, ch_fai ) - ch_versions = ch_versions.mix(PICARD_MARKDUPLICATES.out.versions.first()) ch_markdup = PICARD_MARKDUPLICATES.out.bam.mix(PICARD_MARKDUPLICATES.out.cram) SAMTOOLS_INDEX ( ch_markdup ) - ch_versions = ch_versions.mix(SAMTOOLS_INDEX.out.versions.first()) ch_reads_index = ch_markdup .join(SAMTOOLS_INDEX.out.bai, by: [0], remainder: true) @@ -36,7 +32,6 @@ workflow BAM_MARKDUPLICATES_PICARD { } BAM_STATS_SAMTOOLS ( ch_reads_index, ch_fasta ) - ch_versions = ch_versions.mix(BAM_STATS_SAMTOOLS.out.versions) emit: bam = PICARD_MARKDUPLICATES.out.bam // channel: [ val(meta), path(bam) ] @@ -50,5 +45,4 @@ workflow BAM_MARKDUPLICATES_PICARD { flagstat = BAM_STATS_SAMTOOLS.out.flagstat // channel: [ val(meta), path(flagstat) ] idxstats = BAM_STATS_SAMTOOLS.out.idxstats // channel: [ val(meta), path(idxstats) ] - versions = ch_versions // channel: [ versions.yml ] } diff --git a/subworkflows/nf-core/bam_rseqc/main.nf b/subworkflows/nf-core/bam_rseqc/main.nf index 043321a13..8c3a21b52 100644 --- a/subworkflows/nf-core/bam_rseqc/main.nf +++ b/subworkflows/nf-core/bam_rseqc/main.nf @@ -21,8 +21,6 @@ workflow BAM_RSEQC { bam = bam_bai.map{ [ it[0], it[1] ] } - versions = Channel.empty() - // // Run RSeQC bam_stat.py // @@ -31,7 +29,6 @@ workflow BAM_RSEQC { if ('bam_stat' in rseqc_modules) { RSEQC_BAMSTAT(bam) bamstat_txt = RSEQC_BAMSTAT.out.txt - versions = versions.mix(RSEQC_BAMSTAT.out.versions.first()) } // @@ -52,7 +49,6 @@ workflow BAM_RSEQC { innerdistance_pdf = RSEQC_INNERDISTANCE.out.pdf innerdistance_rscript = RSEQC_INNERDISTANCE.out.rscript innerdistance_all = innerdistance_distance.mix(innerdistance_freq, innerdistance_mean, innerdistance_pdf, innerdistance_rscript) - versions = versions.mix(RSEQC_INNERDISTANCE.out.versions.first()) } // @@ -62,7 +58,6 @@ workflow BAM_RSEQC { if ('infer_experiment' in rseqc_modules) { RSEQC_INFEREXPERIMENT(bam, bed) inferexperiment_txt = RSEQC_INFEREXPERIMENT.out.txt - versions = versions.mix(RSEQC_INFEREXPERIMENT.out.versions.first()) } // @@ -87,7 +82,6 @@ workflow BAM_RSEQC { junctionannotation_rscript = RSEQC_JUNCTIONANNOTATION.out.rscript junctionannotation_log = RSEQC_JUNCTIONANNOTATION.out.log junctionannotation_all = junctionannotation_bed.mix(junctionannotation_interact_bed, junctionannotation_xls, junctionannotation_pdf, junctionannotation_events_pdf, junctionannotation_rscript, junctionannotation_log) - versions = versions.mix(RSEQC_JUNCTIONANNOTATION.out.versions.first()) } // @@ -102,7 +96,6 @@ workflow BAM_RSEQC { junctionsaturation_pdf = RSEQC_JUNCTIONSATURATION.out.pdf junctionsaturation_rscript = RSEQC_JUNCTIONSATURATION.out.rscript junctionsaturation_all = junctionsaturation_pdf.mix(junctionsaturation_rscript) - versions = versions.mix(RSEQC_JUNCTIONSATURATION.out.versions.first()) } // @@ -113,7 +106,6 @@ workflow BAM_RSEQC { if ('read_distribution' in rseqc_modules) { RSEQC_READDISTRIBUTION(bam, bed) readdistribution_txt = RSEQC_READDISTRIBUTION.out.txt - versions = versions.mix(RSEQC_READDISTRIBUTION.out.versions.first()) } // @@ -132,7 +124,6 @@ workflow BAM_RSEQC { readduplication_pdf = RSEQC_READDUPLICATION.out.pdf readduplication_rscript = RSEQC_READDUPLICATION.out.rscript readduplication_all = readduplication_seq_xls.mix(readduplication_pos_xls, readduplication_pdf, readduplication_rscript) - versions = versions.mix(RSEQC_READDUPLICATION.out.versions.first()) } // @@ -143,7 +134,6 @@ workflow BAM_RSEQC { if ('tin' in rseqc_modules) { RSEQC_TIN(bam_bai, bed) tin_txt = RSEQC_TIN.out.txt - versions = versions.mix(RSEQC_TIN.out.versions.first()) } emit: @@ -181,5 +171,4 @@ workflow BAM_RSEQC { tin_txt // channel: [ val(meta), txt ] - versions // channel: [ versions.yml ] } diff --git a/subworkflows/nf-core/bam_sort_stats_samtools/main.nf b/subworkflows/nf-core/bam_sort_stats_samtools/main.nf index b716375b0..3fb5fabba 100644 --- a/subworkflows/nf-core/bam_sort_stats_samtools/main.nf +++ b/subworkflows/nf-core/bam_sort_stats_samtools/main.nf @@ -13,13 +13,9 @@ workflow BAM_SORT_STATS_SAMTOOLS { main: - ch_versions = Channel.empty() - SAMTOOLS_SORT ( ch_bam, ch_fasta ) - ch_versions = ch_versions.mix(SAMTOOLS_SORT.out.versions.first()) SAMTOOLS_INDEX ( SAMTOOLS_SORT.out.bam ) - ch_versions = ch_versions.mix(SAMTOOLS_INDEX.out.versions.first()) SAMTOOLS_SORT.out.bam .join(SAMTOOLS_INDEX.out.bai, by: [0], remainder: true) @@ -35,7 +31,6 @@ workflow BAM_SORT_STATS_SAMTOOLS { .set { ch_bam_bai } BAM_STATS_SAMTOOLS ( ch_bam_bai, ch_fasta ) - ch_versions = ch_versions.mix(BAM_STATS_SAMTOOLS.out.versions) emit: bam = SAMTOOLS_SORT.out.bam // channel: [ val(meta), [ bam ] ] @@ -46,5 +41,4 @@ workflow BAM_SORT_STATS_SAMTOOLS { flagstat = BAM_STATS_SAMTOOLS.out.flagstat // channel: [ val(meta), [ flagstat ] ] idxstats = BAM_STATS_SAMTOOLS.out.idxstats // channel: [ val(meta), [ idxstats ] ] - versions = ch_versions // channel: [ versions.yml ] } diff --git a/subworkflows/nf-core/bam_stats_samtools/main.nf b/subworkflows/nf-core/bam_stats_samtools/main.nf index 44d4c010a..c5855655f 100644 --- a/subworkflows/nf-core/bam_stats_samtools/main.nf +++ b/subworkflows/nf-core/bam_stats_samtools/main.nf @@ -12,21 +12,16 @@ workflow BAM_STATS_SAMTOOLS { ch_fasta // channel: [ val(meta), path(fasta) ] main: - ch_versions = Channel.empty() SAMTOOLS_STATS ( ch_bam_bai, ch_fasta ) - ch_versions = ch_versions.mix(SAMTOOLS_STATS.out.versions) SAMTOOLS_FLAGSTAT ( ch_bam_bai ) - ch_versions = ch_versions.mix(SAMTOOLS_FLAGSTAT.out.versions) SAMTOOLS_IDXSTATS ( ch_bam_bai ) - ch_versions = ch_versions.mix(SAMTOOLS_IDXSTATS.out.versions) emit: stats = SAMTOOLS_STATS.out.stats // channel: [ val(meta), path(stats) ] flagstat = SAMTOOLS_FLAGSTAT.out.flagstat // channel: [ val(meta), path(flagstat) ] idxstats = SAMTOOLS_IDXSTATS.out.idxstats // channel: [ val(meta), path(idxstats) ] - versions = ch_versions // channel: [ path(versions.yml) ] } diff --git a/subworkflows/nf-core/bedgraph_bedclip_bedgraphtobigwig/main.nf b/subworkflows/nf-core/bedgraph_bedclip_bedgraphtobigwig/main.nf index 6c3b7b0cb..52ee83604 100644 --- a/subworkflows/nf-core/bedgraph_bedclip_bedgraphtobigwig/main.nf +++ b/subworkflows/nf-core/bedgraph_bedclip_bedgraphtobigwig/main.nf @@ -12,23 +12,18 @@ workflow BEDGRAPH_BEDCLIP_BEDGRAPHTOBIGWIG { main: - ch_versions = Channel.empty() - // // Clip bedGraph file // UCSC_BEDCLIP ( bedgraph, sizes ) - ch_versions = ch_versions.mix(UCSC_BEDCLIP.out.versions.first()) // // Convert bedGraph to bigWig // UCSC_BEDGRAPHTOBIGWIG ( UCSC_BEDCLIP.out.bedgraph, sizes ) - ch_versions = ch_versions.mix(UCSC_BEDGRAPHTOBIGWIG.out.versions.first()) emit: bigwig = UCSC_BEDGRAPHTOBIGWIG.out.bigwig // channel: [ val(meta), [ bigwig ] ] bedgraph = UCSC_BEDCLIP.out.bedgraph // channel: [ val(meta), [ bedgraph ] ] - versions = ch_versions // channel: [ versions.yml ] } diff --git a/subworkflows/nf-core/fastq_align_hisat2/main.nf b/subworkflows/nf-core/fastq_align_hisat2/main.nf index 511fe035c..ddabffe88 100644 --- a/subworkflows/nf-core/fastq_align_hisat2/main.nf +++ b/subworkflows/nf-core/fastq_align_hisat2/main.nf @@ -11,21 +11,15 @@ workflow FASTQ_ALIGN_HISAT2 { main: - ch_versions = Channel.empty() - - // // Map reads with HISAT2 // HISAT2_ALIGN ( reads, index, splicesites ) - ch_versions = ch_versions.mix(HISAT2_ALIGN.out.versions.first()) // // Sort, index BAM file and run samtools stats, flagstat and idxstats // BAM_SORT_STATS_SAMTOOLS ( HISAT2_ALIGN.out.bam, ch_fasta ) - ch_versions = ch_versions.mix(BAM_SORT_STATS_SAMTOOLS.out.versions) - emit: orig_bam = HISAT2_ALIGN.out.bam // channel: [ val(meta), bam ] @@ -39,5 +33,4 @@ workflow FASTQ_ALIGN_HISAT2 { flagstat = BAM_SORT_STATS_SAMTOOLS.out.flagstat // channel: [ val(meta), [ flagstat ] ] idxstats = BAM_SORT_STATS_SAMTOOLS.out.idxstats // channel: [ val(meta), [ idxstats ] ] - versions = ch_versions // channel: [ versions.yml ] } diff --git a/subworkflows/nf-core/fastq_qc_trim_filter_setstrandedness/main.nf b/subworkflows/nf-core/fastq_qc_trim_filter_setstrandedness/main.nf index a418a1a24..110120ab5 100644 --- a/subworkflows/nf-core/fastq_qc_trim_filter_setstrandedness/main.nf +++ b/subworkflows/nf-core/fastq_qc_trim_filter_setstrandedness/main.nf @@ -112,7 +112,6 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { main: - ch_versions = Channel.empty() ch_filtered_reads = Channel.empty() ch_trim_read_count = Channel.empty() ch_multiqc_files = Channel.empty() @@ -134,8 +133,6 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { ch_fastq.multiple ).reads.mix(ch_fastq.single).set { ch_filtered_reads } - ch_versions = ch_versions.mix(CAT_FASTQ.out.versions.first()) - // // MODULE: Lint FastQ files // @@ -144,7 +141,6 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { FQ_LINT( ch_filtered_reads ) - ch_versions = ch_versions.mix(FQ_LINT.out.versions.first()) ch_lint_log = ch_lint_log.mix(FQ_LINT.out.lint) ch_filtered_reads = ch_filtered_reads.join(FQ_LINT.out.lint.map { it[0] }) } @@ -165,7 +161,6 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { ch_filtered_reads = FASTQ_FASTQC_UMITOOLS_TRIMGALORE.out.reads ch_trim_read_count = FASTQ_FASTQC_UMITOOLS_TRIMGALORE.out.trim_read_count - ch_versions = ch_versions.mix(FASTQ_FASTQC_UMITOOLS_TRIMGALORE.out.versions) ch_multiqc_files = FASTQ_FASTQC_UMITOOLS_TRIMGALORE.out.fastqc_zip .mix(FASTQ_FASTQC_UMITOOLS_TRIMGALORE.out.trim_zip) .mix(FASTQ_FASTQC_UMITOOLS_TRIMGALORE.out.trim_log) @@ -191,7 +186,6 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { ch_filtered_reads = FASTQ_FASTQC_UMITOOLS_FASTP.out.reads ch_trim_read_count = FASTQ_FASTQC_UMITOOLS_FASTP.out.trim_read_count - ch_versions = ch_versions.mix(FASTQ_FASTQC_UMITOOLS_FASTP.out.versions) ch_multiqc_files = FASTQ_FASTQC_UMITOOLS_FASTP.out.fastqc_raw_zip .mix(FASTQ_FASTQC_UMITOOLS_FASTP.out.fastqc_trim_zip) .mix(FASTQ_FASTQC_UMITOOLS_FASTP.out.trim_json) @@ -245,8 +239,6 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { BBMAP_BBSPLIT.out.primary_fastq.set { ch_filtered_reads } - ch_versions = ch_versions.mix(BBMAP_BBSPLIT.out.versions.first()) - if (!skip_linting) { FQ_LINT_AFTER_BBSPLIT( ch_filtered_reads @@ -283,8 +275,6 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { ch_multiqc_files = ch_multiqc_files.mix(SORTMERNA.out.log) - ch_versions = ch_versions.mix(SORTMERNA.out.versions.first()) - if (!skip_linting) { FQ_LINT_AFTER_SORTMERNA( ch_filtered_reads @@ -323,7 +313,6 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { ch_salmon_index, make_salmon_index, ) - ch_versions = ch_versions.mix(FASTQ_SUBSAMPLE_FQ_SALMON.out.versions) FASTQ_SUBSAMPLE_FQ_SALMON.out.lib_format_counts .join(ch_strand_fastq.auto_strand) @@ -343,5 +332,4 @@ workflow FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS { reads = ch_strand_inferred_fastq trim_read_count = ch_trim_read_count multiqc_files = ch_multiqc_files.transpose().map { it[1] } - versions = ch_versions // channel: [ versions.yml ] } diff --git a/subworkflows/nf-core/quantify_pseudo_alignment/main.nf b/subworkflows/nf-core/quantify_pseudo_alignment/main.nf index ba8e91ac0..290d30daf 100644 --- a/subworkflows/nf-core/quantify_pseudo_alignment/main.nf +++ b/subworkflows/nf-core/quantify_pseudo_alignment/main.nf @@ -26,7 +26,6 @@ workflow QUANTIFY_PSEUDO_ALIGNMENT { kallisto_quant_fraglen_sd // val: Estimated standard error for fragment length required by Kallisto in single-end mode main: - ch_versions = Channel.empty() // // Quantify and merge counts across samples @@ -43,7 +42,6 @@ workflow QUANTIFY_PSEUDO_ALIGNMENT { ) ch_pseudo_results = SALMON_QUANT.out.results ch_pseudo_multiqc = ch_pseudo_results - ch_versions = ch_versions.mix(SALMON_QUANT.out.versions.first()) } else { KALLISTO_QUANT ( reads, @@ -55,7 +53,6 @@ workflow QUANTIFY_PSEUDO_ALIGNMENT { ) ch_pseudo_results = KALLISTO_QUANT.out.results ch_pseudo_multiqc = KALLISTO_QUANT.out.log - ch_versions = ch_versions.mix(KALLISTO_QUANT.out.versions.first()) } CUSTOM_TX2GENE ( @@ -65,14 +62,12 @@ workflow QUANTIFY_PSEUDO_ALIGNMENT { gtf_id_attribute, gtf_extra_attribute ) - ch_versions = ch_versions.mix(CUSTOM_TX2GENE.out.versions) TXIMETA_TXIMPORT ( ch_pseudo_results.collect{ it[1] }.map { [ ['id': 'all_samples'], it ] }, CUSTOM_TX2GENE.out.tx2gene, pseudo_aligner ) - ch_versions = ch_versions.mix(TXIMETA_TXIMPORT.out.versions) ch_gene_unified = TXIMETA_TXIMPORT.out.counts_gene .join(TXIMETA_TXIMPORT.out.counts_gene_length_scaled) @@ -86,7 +81,6 @@ workflow QUANTIFY_PSEUDO_ALIGNMENT { CUSTOM_TX2GENE.out.tx2gene, samplesheet ) - ch_versions = ch_versions.mix(SE_GENE_UNIFIED.out.versions) ch_transcript_unified = TXIMETA_TXIMPORT.out.counts_transcript .join(TXIMETA_TXIMPORT.out.lengths_transcript) @@ -98,7 +92,6 @@ workflow QUANTIFY_PSEUDO_ALIGNMENT { CUSTOM_TX2GENE.out.tx2gene, samplesheet ) - ch_versions = ch_versions.mix(SE_TRANSCRIPT_UNIFIED.out.versions) emit: results = ch_pseudo_results // channel: [ val(meta), results_dir ] @@ -116,5 +109,4 @@ workflow QUANTIFY_PSEUDO_ALIGNMENT { merged_gene_rds_unified = SE_GENE_UNIFIED.out.rds // path: *.rds merged_transcript_rds_unified = SE_TRANSCRIPT_UNIFIED.out.rds // path: *.rds - versions = ch_versions // channel: [ versions.yml ] } diff --git a/subworkflows/nf-core/quantify_pseudo_alignment/nextflow.config b/subworkflows/nf-core/quantify_pseudo_alignment/nextflow.config index b82edff51..00796acf4 100644 --- a/subworkflows/nf-core/quantify_pseudo_alignment/nextflow.config +++ b/subworkflows/nf-core/quantify_pseudo_alignment/nextflow.config @@ -2,11 +2,6 @@ if (!params.skip_pseudo_alignment && params.pseudo_aligner == 'salmon') { process { withName: '.*:QUANTIFY_PSEUDO_ALIGNMENT:SALMON_QUANT' { ext.args = { params.extra_salmon_quant_args ?: '' } - publishDir = [ - path: { "${params.outdir}/${params.pseudo_aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') || filename.endsWith('_meta_info.json') || filename.endsWith('_format_counts.json') ? null : filename } - ] } } } @@ -15,41 +10,16 @@ if (!params.skip_pseudo_alignment && params.pseudo_aligner == 'kallisto') { process { withName: '.*:QUANTIFY_PSEUDO_ALIGNMENT:KALLISTO_QUANT' { ext.args = params.extra_kallisto_quant_args ?: '' - publishDir = [ - path: { "${params.outdir}/${params.pseudo_aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') || filename.endsWith('.run_info.json') || filename.endsWith('.log') ? null : filename } - ] } } } if (!params.skip_pseudo_alignment && params.pseudo_aligner) { process { - withName: '.*:QUANTIFY_PSEUDO_ALIGNMENT:CUSTOM_TX2GENE' { - publishDir = [ - path: { "${params.outdir}/${params.pseudo_aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } - ] - } - withName: '.*:QUANTIFY_PSEUDO_ALIGNMENT:TXIMETA_TXIMPORT' { ext.prefix = { "${quant_type}.merged" } - publishDir = [ - path: { "${params.outdir}/${params.pseudo_aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } - ] } - withName: '.*:QUANTIFY_PSEUDO_ALIGNMENT:SE_.*' { - publishDir = [ - path: { "${params.outdir}/${params.pseudo_aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') || filename.endsWith('.log') ? null : filename } - ] - } withName: '.*:QUANTIFY_PSEUDO_ALIGNMENT:SE_GENE_UNIFIED' { ext.prefix = { "${meta.id}_gene" } ext.args = '--assay_names counts,counts_length_scaled,counts_scaled,lengths,tpm' diff --git a/workflows/rnaseq/main.nf b/workflows/rnaseq/main.nf index 9aa333841..25df36093 100755 --- a/workflows/rnaseq/main.nf +++ b/workflows/rnaseq/main.nf @@ -69,18 +69,10 @@ include { FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS } from '../../subwor ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -// Header files for MultiQC -ch_pca_header_multiqc = file("$projectDir/workflows/rnaseq/assets/multiqc/deseq2_pca_header.txt", checkIfExists: true) -sample_status_header_multiqc = file("$projectDir/workflows/rnaseq/assets/multiqc/sample_status_header.txt", checkIfExists: true) -ch_clustering_header_multiqc = file("$projectDir/workflows/rnaseq/assets/multiqc/deseq2_clustering_header.txt", checkIfExists: true) -ch_biotypes_header_multiqc = file("$projectDir/workflows/rnaseq/assets/multiqc/biotypes_header.txt", checkIfExists: true) -ch_dummy_file = ch_pca_header_multiqc - workflow RNASEQ { take: ch_samplesheet // channel: path(sample_sheet.csv) - ch_versions // channel: [ path(versions.yml) ] ch_fasta // channel: path(genome.fasta) ch_gtf // channel: path(genome.gtf) ch_fai // channel: path(genome.fai) @@ -99,6 +91,13 @@ workflow RNASEQ { main: + // Header files for MultiQC + ch_pca_header_multiqc = file("$projectDir/workflows/rnaseq/assets/multiqc/deseq2_pca_header.txt", checkIfExists: true) + sample_status_header_multiqc = file("$projectDir/workflows/rnaseq/assets/multiqc/sample_status_header.txt", checkIfExists: true) + ch_clustering_header_multiqc = file("$projectDir/workflows/rnaseq/assets/multiqc/deseq2_clustering_header.txt", checkIfExists: true) + ch_biotypes_header_multiqc = file("$projectDir/workflows/rnaseq/assets/multiqc/biotypes_header.txt", checkIfExists: true) + ch_dummy_file = ch_pca_header_multiqc + ch_multiqc_files = Channel.empty() ch_trim_status = Channel.empty() ch_map_status = Channel.empty() @@ -159,7 +158,6 @@ workflow RNASEQ { ) ch_multiqc_files = ch_multiqc_files.mix(FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS.out.multiqc_files) - ch_versions = ch_versions.mix(FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS.out.versions) ch_strand_inferred_filtered_fastq = FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS.out.reads ch_trim_read_count = FASTQ_QC_TRIM_FILTER_SETSTRANDEDNESS.out.trim_read_count @@ -207,7 +205,6 @@ workflow RNASEQ { if (params.bam_csi_index) { ch_genome_bam_index = ALIGN_STAR.out.csi } - ch_versions = ch_versions.mix(ALIGN_STAR.out.versions) // // SUBWORKFLOW: Remove duplicate reads from BAM file based on UMIs @@ -227,7 +224,6 @@ workflow RNASEQ { ch_genome_bam = BAM_DEDUP_UMI_STAR.out.bam ch_transcriptome_bam = BAM_DEDUP_UMI_STAR.out.transcriptome_bam ch_genome_bam_index = BAM_DEDUP_UMI_STAR.out.bai - ch_versions = ch_versions.mix(BAM_DEDUP_UMI_STAR.out.versions) ch_multiqc_files = ch_multiqc_files .mix(BAM_DEDUP_UMI_STAR.out.multiqc_files) @@ -259,7 +255,6 @@ workflow RNASEQ { params.kallisto_quant_fraglen, params.kallisto_quant_fraglen_sd ) - ch_versions = ch_versions.mix(QUANTIFY_STAR_SALMON.out.versions) if (!params.skip_qc & !params.skip_deseq2_qc) { DESEQ2_QC_STAR_SALMON ( @@ -269,7 +264,6 @@ workflow RNASEQ { ) ch_multiqc_files = ch_multiqc_files.mix(DESEQ2_QC_STAR_SALMON.out.pca_multiqc.collect()) ch_multiqc_files = ch_multiqc_files.mix(DESEQ2_QC_STAR_SALMON.out.dists_multiqc.collect()) - ch_versions = ch_versions.mix(DESEQ2_QC_STAR_SALMON.out.versions) } } @@ -294,7 +288,6 @@ workflow RNASEQ { if (params.bam_csi_index) { ch_genome_bam_index = QUANTIFY_RSEM.out.csi } - ch_versions = ch_versions.mix(QUANTIFY_RSEM.out.versions) if (!params.skip_qc & !params.skip_deseq2_qc) { DESEQ2_QC_RSEM ( @@ -304,7 +297,6 @@ workflow RNASEQ { ) ch_multiqc_files = ch_multiqc_files.mix(DESEQ2_QC_RSEM.out.pca_multiqc.collect()) ch_multiqc_files = ch_multiqc_files.mix(DESEQ2_QC_RSEM.out.dists_multiqc.collect()) - ch_versions = ch_versions.mix(DESEQ2_QC_RSEM.out.versions) } } @@ -326,7 +318,6 @@ workflow RNASEQ { if (params.bam_csi_index) { ch_genome_bam_index = FASTQ_ALIGN_HISAT2.out.csi } - ch_versions = ch_versions.mix(FASTQ_ALIGN_HISAT2.out.versions) // // SUBWORKFLOW: Remove duplicate reads from BAM file based on UMIs @@ -346,7 +337,6 @@ workflow RNASEQ { ch_genome_bam = BAM_DEDUP_UMI_HISAT2.out.bam ch_genome_bam_index = BAM_DEDUP_UMI_HISAT2.out.bai - ch_versions = ch_versions.mix(BAM_DEDUP_UMI_HISAT2.out.versions) ch_multiqc_files = ch_multiqc_files .mix(BAM_DEDUP_UMI_HISAT2.out.multiqc_files) @@ -415,7 +405,6 @@ workflow RNASEQ { ch_genome_bam ) ch_multiqc_files = ch_multiqc_files.mix(PRESEQ_LCEXTRAP.out.lc_extrap.collect{it[1]}) - ch_versions = ch_versions.mix(PRESEQ_LCEXTRAP.out.versions.first()) } // @@ -437,7 +426,6 @@ workflow RNASEQ { if (params.bam_csi_index) { ch_genome_bam_index = BAM_MARKDUPLICATES_PICARD.out.csi } - ch_versions = ch_versions.mix(BAM_MARKDUPLICATES_PICARD.out.versions) } // @@ -448,7 +436,6 @@ workflow RNASEQ { ch_genome_bam, ch_gtf ) - ch_versions = ch_versions.mix(STRINGTIE_STRINGTIE.out.versions.first()) } // @@ -472,14 +459,12 @@ workflow RNASEQ { SUBREAD_FEATURECOUNTS ( ch_featurecounts ) - ch_versions = ch_versions.mix(SUBREAD_FEATURECOUNTS.out.versions.first()) MULTIQC_CUSTOM_BIOTYPE ( SUBREAD_FEATURECOUNTS.out.counts, ch_biotypes_header_multiqc ) ch_multiqc_files = ch_multiqc_files.mix(MULTIQC_CUSTOM_BIOTYPE.out.tsv.collect{it[1]}) - ch_versions = ch_versions.mix(MULTIQC_CUSTOM_BIOTYPE.out.versions.first()) } // @@ -502,8 +487,6 @@ workflow RNASEQ { true ) - ch_versions = ch_versions.mix(BEDTOOLS_GENOMECOV_FW.out.versions.first()) - // // SUBWORKFLOW: Convert bedGraph to bigWig // @@ -511,7 +494,6 @@ workflow RNASEQ { BEDTOOLS_GENOMECOV_FW.out.genomecov, ch_chrom_sizes ) - ch_versions = ch_versions.mix(BEDGRAPH_BEDCLIP_BEDGRAPHTOBIGWIG_FORWARD.out.versions) BEDGRAPH_BEDCLIP_BEDGRAPHTOBIGWIG_REVERSE ( BEDTOOLS_GENOMECOV_REV.out.genomecov, @@ -529,7 +511,6 @@ workflow RNASEQ { ch_gtf.map { [ [:], it ] } ) ch_multiqc_files = ch_multiqc_files.mix(QUALIMAP_RNASEQ.out.results.collect{it[1]}) - ch_versions = ch_versions.mix(QUALIMAP_RNASEQ.out.versions.first()) } if (!params.skip_dupradar) { @@ -538,13 +519,12 @@ workflow RNASEQ { ch_gtf.map { [ [:], it ] } ) ch_multiqc_files = ch_multiqc_files.mix(DUPRADAR.out.multiqc.collect{it[1]}) - ch_versions = ch_versions.mix(DUPRADAR.out.versions.first()) } // Get RSeqC modules to run def rseqc_modules = params.rseqc_modules ? params.rseqc_modules.split(',').collect{ it.trim().toLowerCase() } : [] if (params.bam_csi_index) { - for (rseqc_module in ['read_distribution', 'inner_distance', 'tin']) { + ['read_distribution', 'inner_distance', 'tin'].each { rseqc_module -> if (rseqc_modules.contains(rseqc_module)) { rseqc_modules.remove(rseqc_module) } @@ -564,19 +544,18 @@ workflow RNASEQ { ch_multiqc_files = ch_multiqc_files.mix(BAM_RSEQC.out.readdistribution_txt.collect{it[1]}) ch_multiqc_files = ch_multiqc_files.mix(BAM_RSEQC.out.readduplication_pos_xls.collect{it[1]}) ch_multiqc_files = ch_multiqc_files.mix(BAM_RSEQC.out.tin_txt.collect{it[1]}) - ch_versions = ch_versions.mix(BAM_RSEQC.out.versions) // Compare predicted supplied or Salmon-predicted strand with what we get from RSeQC ch_strand_comparison = BAM_RSEQC.out.inferexperiment_txt .map { meta, strand_log -> def rseqc_inferred_strand = getInferexperimentStrandedness(strand_log, params.stranded_threshold, params.unstranded_threshold) - rseqc_strandedness = rseqc_inferred_strand.inferred_strandedness + def rseqc_strandedness = rseqc_inferred_strand.inferred_strandedness def status = 'fail' def multiqc_lines = [] if (meta.salmon_strand_analysis) { - salmon_strandedness = meta.salmon_strand_analysis.inferred_strandedness + def salmon_strandedness = meta.salmon_strand_analysis.inferred_strandedness if (salmon_strandedness == rseqc_strandedness && rseqc_strandedness != 'undetermined') { status = 'pass' @@ -634,7 +613,6 @@ workflow RNASEQ { params.save_kraken_unassigned ) ch_kraken_reports = KRAKEN2.out.report - ch_versions = ch_versions.mix(KRAKEN2.out.versions) if (params.contaminant_screening == 'kraken2') { ch_multiqc_files = ch_multiqc_files.mix(KRAKEN2.out.report.collect{it[1]}) @@ -643,7 +621,6 @@ workflow RNASEQ { ch_kraken_reports, params.kraken_db ) - ch_versions = ch_versions.mix(BRACKEN.out.versions) ch_multiqc_files = ch_multiqc_files.mix(BRACKEN.out.txt.collect{it[1]}) } } @@ -676,7 +653,6 @@ workflow RNASEQ { ) ch_counts_gene_length_scaled = QUANTIFY_PSEUDO_ALIGNMENT.out.counts_gene_length_scaled ch_multiqc_files = ch_multiqc_files.mix(QUANTIFY_PSEUDO_ALIGNMENT.out.multiqc.collect{it[1]}) - ch_versions = ch_versions.mix(QUANTIFY_PSEUDO_ALIGNMENT.out.versions) if (!params.skip_qc & !params.skip_deseq2_qc) { DESEQ2_QC_PSEUDO ( @@ -686,13 +662,14 @@ workflow RNASEQ { ) ch_multiqc_files = ch_multiqc_files.mix(DESEQ2_QC_PSEUDO.out.pca_multiqc.collect()) ch_multiqc_files = ch_multiqc_files.mix(DESEQ2_QC_PSEUDO.out.dists_multiqc.collect()) - ch_versions = ch_versions.mix(DESEQ2_QC_PSEUDO.out.versions) } } // // Collate and save software versions // + ch_versions = Channel.topic('versions') + softwareVersionsToYAML(ch_versions) .collectFile(storeDir: "${params.outdir}/pipeline_info", name: 'nf_core_rnaseq_software_mqc_versions.yml', sort: true, newLine: true) .set { ch_collated_versions } @@ -701,6 +678,8 @@ workflow RNASEQ { // MODULE: MultiQC // ch_multiqc_report = Channel.empty() + ch_multiqc_data = Channel.empty() + ch_multiqc_plots = Channel.empty() if (!params.skip_multiqc) { @@ -760,14 +739,39 @@ workflow RNASEQ { [] ) ch_multiqc_report = MULTIQC.out.report + ch_multiqc_data = MULTIQC.out.data + ch_multiqc_plots = MULTIQC.out.plots } emit: trim_status = ch_trim_status // channel: [id, boolean] map_status = ch_map_status // channel: [id, boolean] strand_status = ch_strand_status // channel: [id, boolean] - multiqc_report = ch_multiqc_report // channel: /path/to/multiqc_report.html - versions = ch_versions // channel: [ path(versions.yml) ] + + // TODO: !params.skip_alignment && params.aligner == 'star_salmon' + star_salmon = QUANTIFY_STAR_SALMON.out.results + star_salmon_deseq_qc = DESEQ2_QC_STAR_SALMON.out + + // TODO: !params.skip_alignment && params.aligner == 'star_rsem' + star_rsem = QUANTIFY_RSEM.out + star_rsem_deseq_qc = DESEQ2_QC_RSEM.out + + // TODO: !params.skip_alignment && params.aligner == 'hisat2' + hisat2 = FASTQ_ALIGN_HISAT2.out + + // TODO: are these per-sample or summary outputs? + // STRINGTIE_STRINGTIE.out >> "${params.aligner}/stringtie" + // SUBREAD_FEATURECOUNTS.out >> "${params.aligner}/featurecounts" + // MULTIQC_CUSTOM_BIOTYPE.out.tsv >> "${params.aligner}/featurecounts" + // BEDGRAPH_BEDCLIP_BEDGRAPHTOBIGWIG_FORWARD.out.bigwig >> "${params.aligner}/bigwig" + // BEDGRAPH_BEDCLIP_BEDGRAPHTOBIGWIG_REVERSE.out.bigwig >> "${params.aligner}/bigwig" + // QUALIMAP_RNASEQ.out.results >> "${params.aligner}/qualimap" + // DUPRADAR.out >> "${params.aligner}/dupradar" + + multiqc_report = ch_multiqc_report + multiqc_data = ch_multiqc_data + multiqc_plots = ch_multiqc_plots + } /* diff --git a/workflows/rnaseq/nextflow.config b/workflows/rnaseq/nextflow.config index a621b3bbe..3cbbef628 100644 --- a/workflows/rnaseq/nextflow.config +++ b/workflows/rnaseq/nextflow.config @@ -29,37 +29,14 @@ if (!params.skip_alignment && params.aligner == 'star_salmon') { process { withName: '.*:QUANTIFY_STAR_SALMON:SALMON_QUANT' { ext.args = { params.extra_salmon_quant_args ?: '' } - publishDir = [ - path: { "${params.outdir}/${params.aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') || filename.endsWith('_meta_info.json') || filename.endsWith('_format_counts.json') ? null : filename } - ] - } - - withName: '.*:QUANTIFY_STAR_SALMON:CUSTOM_TX2GENE' { - publishDir = [ - path: { "${params.outdir}/${params.aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } - ] } withName: '.*:QUANTIFY_STAR_SALMON:TXIMETA_TXIMPORT' { ext.prefix = { "${quant_type}.merged" } - publishDir = [ - path: { "${params.outdir}/${params.aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') ? null : filename } - ] } withName: '.*:QUANTIFY_STAR_SALMON:SE_.*' { ext.prefix = { "${params.pseudo_aligner}.merged" } - publishDir = [ - path: { "${params.outdir}/${params.aligner}" }, - mode: params.publish_dir_mode, - saveAs: { filename -> filename.equals('versions.yml') || filename.endsWith('.log') ? null : filename } - ] } withName: '.*:QUANTIFY_STAR_SALMON:SE_GENE' { ext.prefix = { "${params.pseudo_aligner}.merged.gene_counts" } @@ -80,58 +57,18 @@ if (!params.skip_alignment && params.aligner == 'star_salmon') { withName: 'NFCORE_RNASEQ:RNASEQ:BAM_DEDUP_UMI_(STAR|HISAT2):SAMTOOLS_SORT' { ext.args = '-n' ext.prefix = { "${meta.id}.umi_dedup.transcriptome" } - publishDir = [ - path: { params.save_align_intermeds || params.save_umi_intermeds ? "${params.outdir}/${params.aligner}" : params.outdir }, - mode: params.publish_dir_mode, - pattern: '*.bam', - saveAs: { params.save_align_intermeds || params.save_umi_intermeds ? it : null } - ] } withName: 'NFCORE_RNASEQ:RNASEQ:BAM_DEDUP_UMI_(STAR|HISAT2):UMITOOLS_PREPAREFORRSEM' { ext.prefix = { "${meta.id}.umi_dedup.transcriptome.filtered" } - publishDir = [ - [ - path: { "${params.outdir}/${params.aligner}/umitools/prepare_for_salmon_log" }, - mode: params.publish_dir_mode, - pattern: '*.log' - ], - [ - path: { params.save_align_intermeds || params.save_umi_intermeds ? "${params.outdir}/${params.aligner}" : params.outdir }, - mode: params.publish_dir_mode, - pattern: '*.bam', - saveAs: { params.save_align_intermeds || params.save_umi_intermeds ? it : null } - ] - ] } withName: 'NFCORE_RNASEQ:RNASEQ:BAM_DEDUP_UMI_STAR:BAM_SORT_STATS_SAMTOOLS:SAMTOOLS_SORT' { ext.prefix = { "${meta.id}.transcriptome.sorted" } - publishDir = [ - path: { params.save_align_intermeds || params.save_umi_intermeds ? "${params.outdir}/${params.aligner}" : params.outdir }, - mode: params.publish_dir_mode, - pattern: '*.bam', - saveAs: { params.save_align_intermeds || params.save_umi_intermeds ? it : null } - ] - } - - withName: 'NFCORE_RNASEQ:RNASEQ:BAM_DEDUP_UMI_STAR:BAM_SORT_STATS_SAMTOOLS:SAMTOOLS_INDEX' { - publishDir = [ - path: { params.save_align_intermeds || params.save_umi_intermeds ? "${params.outdir}/${params.aligner}" : params.outdir }, - mode: params.publish_dir_mode, - pattern: '*.bai', - saveAs: { params.save_align_intermeds || params.save_umi_intermeds ? it : null } - ] } withName: 'NFCORE_RNASEQ:RNASEQ:BAM_DEDUP_UMI_STAR:BAM_SORT_STATS_SAMTOOLS:BAM_STATS_SAMTOOLS:.*' { ext.prefix = { "${meta.id}.transcriptome.sorted.bam" } - publishDir = [ - path: { params.save_align_intermeds || params.save_umi_intermeds ? "${params.outdir}/${params.aligner}/samtools_stats" : params.outdir }, - mode: params.publish_dir_mode, - pattern: '*.{stats,flagstat,idxstats}', - saveAs: { params.save_align_intermeds || params.save_umi_intermeds ? it : null } - ] } // Use the same umi_dedup prefix for umitools and umicollapse @@ -139,65 +76,6 @@ if (!params.skip_alignment && params.aligner == 'star_salmon') { withName: '.*:BAM_DEDUP_STATS_SAMTOOLS_UMI(COLLAPSE|TOOLS)_TRANSCRIPTOME:UMI(COLLAPSE|TOOLS_DEDUP)' { ext.prefix = { "${meta.id}.umi_dedup.transcriptome.sorted" } } - - // Publishing logic for umitools: - - withName: '.*:BAM_DEDUP_STATS_SAMTOOLS_UMITOOLS_TRANSCRIPTOME:UMITOOLS_DEDUP' { - publishDir = [ - [ - path: { params.save_align_intermeds || params.save_umi_intermeds ? "${params.outdir}/${params.aligner}" : params.outdir }, - mode: params.publish_dir_mode, - pattern: '*.bam', - saveAs: { params.save_align_intermeds || params.save_umi_intermeds ? it : null } - ], - [ - path: { "${params.outdir}/${params.aligner}/umitools/transcriptomic_dedup_log" }, - mode: params.publish_dir_mode, - pattern: '*.log' - ], - [ - path: { "${params.outdir}/${params.aligner}/umitools" }, - mode: params.publish_dir_mode, - pattern: '*.tsv' - ] - ] - } - - // Publishing logic for umicollapse - - withName: '.*:BAM_DEDUP_STATS_SAMTOOLS_UMICOLLAPSE_TRANSCRIPTOME:UMICOLLAPSE' { - publishDir = [ - [ - path: { params.save_align_intermeds || params.save_umi_intermeds ? "${params.outdir}/${params.aligner}" : params.outdir }, - mode: params.publish_dir_mode, - pattern: '*.bam', - saveAs: { params.save_align_intermeds || params.save_umi_intermeds ? it : null } - ], - [ - path: { "${params.outdir}/${params.aligner}/umicollapse/transcriptomic_dedup_log" }, - mode: params.publish_dir_mode, - pattern: '*.log' - ] - ] - } - - withName: '.*:BAM_DEDUP_STATS_SAMTOOLS_UMI(COLLAPSE|TOOLS)_TRANSCRIPTOME:SAMTOOLS_INDEX' { - publishDir = [ - path: { params.save_align_intermeds || params.save_umi_intermeds ? "${params.outdir}/${params.aligner}" : params.outdir }, - mode: params.publish_dir_mode, - pattern: '*.bai', - saveAs: { params.save_align_intermeds || params.save_umi_intermeds ? it : null } - ] - } - - withName: '.*:BAM_DEDUP_STATS_SAMTOOLS_UMI(COLLAPSE|TOOLS)_TRANSCRIPTOME:BAM_STATS_SAMTOOLS:.*' { - ext.prefix = { "${meta.id}.umi_dedup.transcriptome.sorted.bam" } - publishDir = [ - path: { "${params.outdir}/${params.aligner}/samtools_stats" }, - mode: params.publish_dir_mode, - pattern: '*.{stats,flagstat,idxstats}' - ] - } } } } @@ -406,11 +284,6 @@ if (!params.skip_alignment && params.aligner == 'star_salmon') { params.deseq2_vst ? '--vst TRUE' : '' ].join(' ').trim() } ext.args2 = 'star_salmon' - publishDir = [ - path: { "${params.outdir}/${params.aligner}/deseq2_qc" }, - mode: params.publish_dir_mode, - pattern: "*{RData,pca.vals.txt,plots.pdf,sample.dists.txt,size_factors,log}" - ] } } } @@ -427,11 +300,6 @@ if (!params.skip_alignment && params.aligner == 'star_rsem') { params.deseq2_vst ? '--vst TRUE' : '' ].join(' ').trim() } ext.args2 = 'star_rsem' - publishDir = [ - path: { "${params.outdir}/${params.aligner}/deseq2_qc" }, - mode: params.publish_dir_mode, - pattern: "*{RData,pca.vals.txt,plots.pdf,sample.dists.txt,size_factors,log}" - ] } } } @@ -492,11 +360,6 @@ if (!params.skip_pseudo_alignment && params.pseudo_aligner) { params.deseq2_vst ? '--vst TRUE' : '' ].join(' ').trim() } ext.args2 = { params.pseudo_aligner } - publishDir = [ - path: { "${params.outdir}/${params.pseudo_aligner}/deseq2_qc" }, - mode: params.publish_dir_mode, - pattern: "*{RData,pca.vals.txt,plots.pdf,sample.dists.txt,size_factors,log}" - ] } } }