diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml
index e7d98850..4dc32a60 100644
--- a/.github/workflows/awsfulltest.yml
+++ b/.github/workflows/awsfulltest.yml
@@ -14,10 +14,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Launch workflow via tower
+<<<<<<< HEAD
uses: seqeralabs/action-tower-launch@922e5c8d5ac4e918107ec311d2ebbd65e5982b3d # v2
# TODO nf-core: You can customise AWS full pipeline tests as required
# Add full size test data (but still relatively small datasets for few samples)
# on the `test_full.config` test runs with only one set of parameters
+=======
+ uses: seqeralabs/action-tower-launch@v2
+>>>>>>> dev
with:
workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }}
access_token: ${{ secrets.TOWER_ACCESS_TOKEN }}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f9546d96..89155ce2 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -22,10 +22,24 @@ jobs:
if: "${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/taxprofiler') }}"
runs-on: ubuntu-latest
strategy:
+ fail-fast: false
matrix:
NXF_VER:
- "23.04.0"
- "latest-everything"
+ tags:
+ - "test"
+ - "test_nopreprocessing"
+ - "test_noprofiling"
+ - "test_krakenuniq"
+ - "test_malt"
+ - "test_motus"
+ - "test_falco"
+ - "test_fastp"
+ - "test_adapterremoval"
+ - "test_bbduk"
+ - "test_prinseqplusplus"
+
steps:
- name: Check out pipeline code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
@@ -35,12 +49,27 @@ jobs:
with:
version: "${{ matrix.NXF_VER }}"
+<<<<<<< HEAD
- name: Disk space cleanup
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
+=======
+ - name: Show current locale
+ run: locale
+
+ - name: Set UTF-8 enabled locale
+ run: |
+ sudo locale-gen en_US.UTF-8
+ sudo update-locale LANG=en_US.UTF-8
+>>>>>>> dev
- name: Run pipeline with test data
- # TODO nf-core: You can customise CI pipeline run tests as required
- # For example: adding multiple test runs with different parameters
- # Remember that you can parallelise this by using strategy.matrix
run: |
- nextflow run ${GITHUB_WORKSPACE} -profile test,docker --outdir ./results
+ if [[ "${{ matrix.tags }}" == "test_motus" ]]; then
+ wget https://raw.githubusercontent.com/motu-tool/mOTUs/master/motus/downloadDB.py
+ python downloadDB.py > download_db_log.txt
+ echo 'tool,db_name,db_params,db_path' > 'database_motus.csv'
+ echo 'motus,db_mOTU,,db_mOTU' >> 'database_motus.csv'
+ nextflow run ${GITHUB_WORKSPACE} -profile docker,${{ matrix.tags }} --databases ./database_motus.csv --outdir ./results_${{ matrix.tags }};
+ else
+ nextflow run ${GITHUB_WORKSPACE} -profile docker,${{ matrix.tags }} --outdir ./results_${{ matrix.tags }};
+ fi
diff --git a/.prettierignore b/.prettierignore
index 437d763d..abb4b4d6 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -10,3 +10,4 @@ testing/
testing*
*.pyc
bin/
+tests/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 313368e1..99b7669b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,10 +3,195 @@
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+<<<<<<< HEAD
## v1.1.6dev - [date]
+=======
+## v1.1.6dev - [unreleased]
+
+### `Added`
+
+### `Fixed`
+
+### `Dependencies`
+
+### `Deprecated`
+
+## v1.1.5 - Augmented Akita Patch [2024-02-08]
+
+### `Added`
+
+- [#439](https://github.com/nf-core/taxprofiler/pull/439) Read deduplication with fastp (added by @maxibor)
+- [#440](https://github.com/nf-core/taxprofiler/pull/440) Include mention of pre-built kaiju databases in tutorial.md (added by @Joon-Klaps)
+- [#442](https://github.com/nf-core/taxprofiler/pull/442) Updated to nf-core pipeline template v2.12 (added by @sofstam)
+
+### `Fixed`
+
+- [#444](https://github.com/nf-core/taxprofiler/pull/444) Centrifuge now uses dedicated tmp directory to hopefully prevent mkfifo clashes (❤️ to @erinyoung for reporting, fix by @jfy133)
+
+### `Dependencies`
+
+| Tool | Previous version | New version |
+| ---------- | ---------------- | ----------- |
+| Centrifuge | 1.0.4_beta | 1.0.4.1 |
+
+### `Deprecated`
+
+## v1.1.4 - Augmented Akita Patch [2024-01-24]
+
+### `Added`
+
+### `Fixed`
+
+- [#431](https://github.com/nf-core/modules/pull/4781#event-11555493525) Updated kaiju2table module to report taxon names (fix by @Joon-Klaps)
+- [#430](https://github.com/nf-core/taxprofiler/pull/430) Fix the fastq output in the module LONGREAD_HOSTREMOVAL. (fix by @LilyAnderssonLee)
+
+### `Dependencies`
+
+| Tool | Previous version | New version |
+| ----- | ---------------- | ----------- |
+| kaiju | 1.8.2 | 1.10.0 |
+
+### `Deprecated`
+
+## v1.1.3 - Augmented Akita Patch [2024-01-12]
+
+### `Added`
+
+- [#424](https://github.com/nf-core/taxprofiler/pull/424) Updated to nf-core pipeline template v2.11.1 (added by @LilyAnderssonLee & @sofstam)
+
+### `Fixed`
+
+- [#419](https://github.com/nf-core/taxprofiler/pull/419) Added improved syntax highlighting for tables in documentation (fix by @mashehu)
+- [#421](https://github.com/nf-core/taxprofiler/pull/421) Updated the krakenuniq/preloadedkrakenuniq module that contained a fix for saving the output reads (❤️ to @SannaAb for reporting, fix by @Midnighter)
+- [#427](https://github.com/nf-core/taxprofiler/pull/427) Fixed preprint information in the recommended methods text (fix by @jfy133)
+
+### `Dependencies`
+
+| Tool | Previous version | New version |
+| ------------- | ---------------- | ----------- |
+| multiqc | 1.15 | 1.19 |
+| fastqc | 11.9 | 12.1 |
+| nf-validation | unpinned | 1.1.3 |
+
+## v1.1.2 - Augmented Akita Patch [2023-10-27]
+
+### `Added`
+
+- [#408](https://github.com/nf-core/taxprofiler/pull/408) Added preprint citation information to README and manifest (added by @jfy133)
+
+### `Fixed`
+
+- [#405](https://github.com/nf-core/taxprofiler/pull/405) Fix database to tool mismatching in KAIJU2KRONA input (❤️ to @MajoroMask for reporting, fix by @jfy133)
+- [#406](https://github.com/nf-core/taxprofiler/pull/406) Fix overwriting of bracken-derived kraken2 outputs when the database name is shared between Bracken/Kraken2. (❤️ to @MajoroMask for reporting, fix by @jfy133)
+- [#409](https://github.com/nf-core/taxprofiler/pull/409) Fix a NullPointerException error occurring occasionally in older version of MEGAN's rma2info (❤️ to @MajoroMask for reporting, fix by @jfy133)
+
+### `Dependencies`
+
+| Tool | Previous version | New version |
+| -------------- | ---------------- | ----------- |
+| megan/rma2info | 6.21.7 | 6.24.20 |
+
+### `Deprecated`
+
+## v1.1.1 - Augmented Akita Patch [2023-10-11]
+
+### `Added`
+
+- [#379](https://github.com/nf-core/taxprofiler/pull/379) Added support for previously missing Bracken-corrected Kraken2 report as output (added by @hkaspersen & @jfy133 )
+- [#380](https://github.com/nf-core/taxprofiler/pull/380) Updated to nf-core pipeline template v2.10 (added by @LilyAnderssonLee & @sofstam)
+- [#393](https://github.com/nf-core/taxprofiler/pull/383) Add validation check for a taxpasta taxonomy directory if --taxpasta*add*\* parameters requested (♥️ to @alimalrashed for reporting, added by @jfy133)
+
+### `Fixed`
+
+- [#383](https://github.com/nf-core/taxprofiler/pull/383) Update the module of KrakenUniq to the latest to account for edge case bugs where FASTQ input was mis-detected as wrong format (❤️ to @asafpr for reporting and solution, fixed by @LilyAnderssonLee)
+- [#392](https://github.com/nf-core/taxprofiler/pull/392) Update the module of Taxpasta to support adding taxa information to results (❤️ to @SannaAb for reporting, fixed by @Midnighter)
+
+### `Dependencies`
+
+| Tool | Previous version | New version |
+| ---------- | ---------------- | ----------- |
+| KrakenUniq | 1.0.2 | 1.0.4 |
+| taxpasta | 0.6.0 | 0.6.1 |
+
+### `Deprecated`
+
+## v1.1.0 - Augmented Akita [2023-09-19]
+
+### `Added`
+
+- [#298](https://github.com/nf-core/taxprofiler/pull/298) **New classifier** [ganon](https://pirovc.github.io/ganon/) (added by @jfy133)
+- [#312](https://github.com/nf-core/taxprofiler/pull/312) **New classifier** [KMCP](https://github.com/shenwei356/kmcp) (added by @sofstam)
+- [#318](https://github.com/nf-core/taxprofiler/pull/318) **New classifier** [MetaPhlAn4](https://github.com/biobakery/MetaPhlAn) (MetaPhlAn3 support remains) (added by @LilyAnderssonLee)
+- [#276](https://github.com/nf-core/taxprofiler/pull/276) Implemented batching in the KrakenUniq samples processing (added by @Midnighter)
+- [#272](https://github.com/nf-core/taxprofiler/pull/272) Add saving of final 'analysis-ready-reads' to dedicated directory (❤️ to @alexhbnr for request, added by @jfy133)
+- [#303](https://github.com/nf-core/taxprofiler/pull/303) Add support for taxpasta profile standardisation in single sample pipeline runs (❤️ to @artur-matysik for request, added by @jfy133)
+- [#308](https://github.com/nf-core/taxprofiler/pull/308) Add citations and bibliographic information to the MultiQC methods text of tools used in a given pipeline run (added by @jfy133)
+- [#315](https://github.com/nf-core/taxprofiler/pull/315) Updated to nf-core pipeline template v2.9 (added by @sofstam & @jfy133)
+- [#321](https://github.com/nf-core/taxprofiler/pull/321) Added support for virus hit expansion in Kaiju (❤️ to @dnlrxn for requesting, added by @jfy133)
+- [#325](https://github.com/nf-core/taxprofiler/pull/325) Add ability to skip sequencing quality control tools (❤️ to @vinisalazar for requesting, added by @jfy133)
+- [#345](https://github.com/nf-core/taxprofiler/pull/345) Add simple tutorial to explain how to get up and running with an nf-core/taxprofiler run (added by @jfy133)
+- [#355](https://github.com/nf-core/taxprofiler/pull/355) Add support for TAXPASTA's `--add-rank-lineage` to output (❤️ to @MajoroMask for request, added by @Midnighter, @sofstam, @jfy133)
+- [#368](https://github.com/nf-core/taxprofiler/pull/368/) Add the ability to ignore profile errors caused by empty profiles and other validation errors when merging multiple profiles using TAXPASTA (added by @Midnighter and @LilyAnderssonLee)
+
+### `Fixed`
+
+- [#271](https://github.com/nf-core/taxprofiler/pull/271) Improved standardised table generation documentation for mOTUs manual database download tutorial (♥ to @prototaxites for reporting, fix by @jfy133)
+- [#269](https://github.com/nf-core/taxprofiler/pull/269) Reduced output files in AWS full test output due to very large files (fix by @jfy133)
+- [#270](https://github.com/nf-core/taxprofiler/pull/270) Fixed warning for host removal index parameter, and improved index checks (♥ to @prototaxites for reporting, fix by @jfy133)
+- [#274](https://github.com/nf-core/taxprofiler/pull/274) Substituted the samtools/bam2fq module with samtools/fastq module (fix by @sofstam)
+- [#275](https://github.com/nf-core/taxprofiler/pull/275) Replaced function used for error reporting to more Nextflow friendly method (fix by @jfy133)
+- [#285](https://github.com/nf-core/taxprofiler/pull/285) Fixed overly large log files in Kraken2 output (♥ to @prototaxites for reporting, fix by @Midnighter & @jfy133)
+- [#286](https://github.com/nf-core/taxprofiler/pull/286) Runtime optimisation of MultiQC step via improved log file processing (fix by @Midnighter & @jfy133)
+- [#289](https://github.com/nf-core/taxprofiler/pull/289) Pipeline updated to nf-core template 2.8 (fix by @Midnighter & @jfy133)
+- [#290](https://github.com/nf-core/taxprofiler/pull/290) Minor database input documentation improvements (♥ to @alneberg for reporting, fix by @jfy133)
+- [#305](https://github.com/nf-core/taxprofiler/pull/305) Fix docker/podman registry definition for tower compatibility (fix by @adamrtalbot, @jfy133)
+- [#304](https://github.com/nf-core/taxprofiler/pull/304) Correct mistake in kaiju2table documentation, only single rank can be supplied (♥ to @artur-matysik for reporting, fix by @jfy133)
+- [#307](https://github.com/nf-core/taxprofiler/pull/307) Fix databases being sometimes associated with the wrong tool (e.g. Kaiju) (fix by @jfy133, @Midnighter and @LilyAnderssonLee)
+- [#313](https://github.com/nf-core/taxprofiler/pull/313) Fix pipeline not providing error when database sheet does not have a header (♥ to @noah472 for reporting, fix by @jfy133)
+- [#330](https://github.com/nf-core/taxprofiler/pull/330) Added better tagging to allow disambiguation of Kraken2 steps of Kraken2 vs Bracken (♥ to @MajoroMask for requesting, added by @jfy133)
+- [#334](https://github.com/nf-core/taxprofiler/pull/334) Increase the memory of the FALCO process to 4GB (fix by @LilyAnderssonLee)
+- [#332](https://github.com/nf-core/taxprofiler/pull/332) Improved meta map stability for more robust pipeline resuming (fix by @jfy133)
+- [#338](https://github.com/nf-core/taxprofiler/pull/338) Fixed wrong file 'out' file going to `centrifuge kreport` module (♥ to @LilyAnderssonLee for reporting, fix by @jfy133)
+- [#342](https://github.com/nf-core/taxprofiler/pull/342) Fixed docs/usage to correctly list the required database files for Bracken and tips to obtain Kraken2 databases (fix by @husensofteng)
+- [#350](https://github.com/nf-core/taxprofiler/pull/350) Reorganize the CI tests into separate profiles in preparation for implementation of nf-test (fix by @LilyAnderssonLee)
+- [#364](https://github.com/nf-core/taxprofiler/pull/364) Add autoMounts to apptainer profile in nextflow.config (♥ to @hkaspersen for reporting, fix by @LilyAnderssonLee)
+- [#372](https://github.com/nf-core/taxprofiler/pull/372) Update modules to use quay.io nf-core mirrored containers (♥ to @maxulysse for pointing out, fix by @LilyAnderssonLee and @jfy133)
+
+### `Dependencies`
+
+| Tool | Previous version | New version |
+| --------- | ---------------- | ----------- |
+| MultiQC | 1.13 | 1.15 |
+| TAXPASTA | 0.2.3 | 0.6.0 |
+| MetaPhlAn | 3.0.12 | 4.0.6 |
+| fastp | 0.23.2 | 0.23.4 |
+| samtools | 1.16.1 | 1.17 |
+
+### `Deprecated`
+
+- [#338](https://github.com/nf-core/taxprofiler/pull/338) Updated Centrifuge module to not generate (undocumented) SAM alignments by default if --save_centrifuge_reads supplied, due to a Centrifuge bug modifying profile header. SAM alignments can still be generated if `--out-fmt` supplied in `database.csv` (♥ to @LilyAnderssonLee for reporting, fix by @jfy133)
+
+## v1.0.1 - Dodgy Dachshund Patch [2023-05-15]
+
+### `Added`
+
+### `Fixed`
+
+- [#291](https://github.com/nf-core/taxprofiler/pull/291) - Fix Taxpasta not receiving taxonomy directory (❤️ to @SannaAb for reporting, fix by @jfy133)
+
+## v1.0.0 - Dodgy Dachshund [2023-03-13]
+>>>>>>> dev
Initial release of nf-core/taxprofiler, created with the [nf-core](https://nf-co.re/) template.
+- Add read quality control (sequencing QC, adapter removal and merging)
+- Add read complexity filtering
+- Add host-reads removal step
+- Add run merging
+- Add taxonomic classification
+- Add taxon table standardisation
+- Add post-classification visualisation
+
### `Added`
### `Fixed`
diff --git a/CITATIONS.md b/CITATIONS.md
index e10d62ae..b82fe7d7 100644
--- a/CITATIONS.md
+++ b/CITATIONS.md
@@ -2,11 +2,11 @@
## [nf-core](https://pubmed.ncbi.nlm.nih.gov/32055031/)
-> Ewels PA, Peltzer A, Fillinger S, Patel H, Alneberg J, Wilm A, Garcia MU, Di Tommaso P, Nahnsen S. The nf-core framework for community-curated bioinformatics pipelines. Nat Biotechnol. 2020 Mar;38(3):276-278. doi: 10.1038/s41587-020-0439-x. PubMed PMID: 32055031.
+> Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., & Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. In Nature Biotechnology (Vol. 38, Issue 3). https://doi.org/10.1038/s41587-020-0439-x
## [Nextflow](https://pubmed.ncbi.nlm.nih.gov/28398311/)
-> Di Tommaso P, Chatzou M, Floden EW, Barja PP, Palumbo E, Notredame C. Nextflow enables reproducible computational workflows. Nat Biotechnol. 2017 Apr 11;35(4):316-319. doi: 10.1038/nbt.3820. PubMed PMID: 28398311.
+> Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. In Nature Biotechnology (Vol. 35, Issue 4). https://doi.org/10.1038/nbt.3820
## Pipeline tools
@@ -16,7 +16,103 @@
- [MultiQC](https://pubmed.ncbi.nlm.nih.gov/27312411/)
- > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924.
+ > Ewels, P., Magnusson, M., Lundin, S., & Käller, M. (2016). MultiQC: Summarize analysis results for multiple tools and samples in a single report. Bioinformatics, 32(19). https://doi.org/10.1093/bioinformatics/btw354
+
+- [falco](https://doi.org/10.12688/f1000research.21142.2)
+
+ > de Sena Brandine, G., & Smith, A. D. (2021). Falco: high-speed FastQC emulation for quality control of sequencing data. F1000Research, 8(1874), 1874. https://doi.org/10.12688/f1000research.21142.2
+
+- [fastp](https://doi.org/10.1093/bioinformatics/bty560)
+
+ > Chen, S., Zhou, Y., Chen, Y., & Gu, J. (2018). fastp: an ultra-fast all-in-one FASTQ preprocessor. Bioinformatics , 34(17), i884–i890. https://doi.org/10.1093/bioinformatics/bty560
+
+- [AdapterRemoval2](https://doi.org/10.1186/s13104-016-1900-2)
+
+ > Schubert, M., Lindgreen, S., & Orlando, L. (2016). AdapterRemoval v2: rapid adapter trimming, identification, and read merging. BMC Research Notes, 9, 88. https://doi.org/10.1186/s13104-016-1900-2
+
+- [Porechop](https://github.com/rrwick/Porechop)
+
+ > Wick, R. R., Judd, L. M., Gorrie, C. L., & Holt, K. E. (2017). Completing bacterial genome assemblies with multiplex MinION sequencing. Microbial Genomics, 3(10), e000132. https://doi.org/10.1099/mgen.0.000132
+
+- [Filtlong](https://github.com/rrwick/Filtlong)
+
+ > Wick R (2021) Filtlong, URL: https://github.com/rrwick/Filtlong
+
+- [BBTools](http://sourceforge.net/projects/bbmap/)
+
+ > Bushnell B. (2022) BBMap, URL: http://sourceforge.net/projects/bbmap/
+
+- [PRINSEQ++](https://doi.org/10.7287/peerj.preprints.27553v1)
+
+ > Cantu, V. A., Sadural, J., & Edwards, R. (2019). PRINSEQ++, a multi-threaded tool for fast and efficient quality control and preprocessing of sequencing datasets (e27553v1). PeerJ Preprints. https://doi.org/10.7287/peerj.preprints.27553v1
+
+- [Bowtie2](https://doi.org/10.1038/nmeth.1923)
+
+ > Langmead, B., & Salzberg, S. L. (2012). Fast gapped-read alignment with Bowtie 2. Nature Methods, 9(4), 357–359. https://doi.org/10.1038/nmeth.1923
+
+- [minimap2](https://doi.org/10.1093/bioinformatics/bty191)
+
+ > Li, H. (2018). Minimap2: pairwise alignment for nucleotide sequences. Bioinformatics , 34(18), 3094–3100. https://doi.org/10.1093/bioinformatics/bty191
+
+- [SAMTools](https://doi.org/10.1093/gigascience/giab008)
+
+ > Danecek, P., Bonfield, J. K., Liddle, J., Marshall, J., Ohan, V., Pollard, M. O., Whitwham, A., Keane, T., McCarthy, S. A., Davies, R. M., & Li, H. (2021). Twelve years of SAMtools and BCFtools. GigaScience, 10(2). https://doi.org/10.1093/gigascience/giab008
+
+- [Bracken](https://doi.org/10.7717/peerj-cs.104)
+
+ > Lu, J., Breitwieser, F. P., Thielen, P., & Salzberg, S. L. (2017). Bracken: estimating species abundance in metagenomics data. PeerJ. Computer Science, 3(e104), e104. https://doi.org/10.7717/peerj-cs.104
+
+- [Kraken2](https://doi.org/10.1186/s13059-019-1891-0)
+
+ > Wood, D. E., Lu, J., & Langmead, B. (2019). Improved metagenomic analysis with Kraken 2. Genome Biology, 20(1), 257. https://doi.org/10.1186/s13059-019-1891-0
+
+- [KrakenUniq](https://doi.org/10.1186/s13059-018-1568-0)
+
+ > Breitwieser, F. P., Baker, D. N., & Salzberg, S. L. (2018). KrakenUniq: confident and fast metagenomics classification using unique k-mer counts. Genome Biology, 19(1), 198. https://doi.org/10.1186/s13059-018-1568-0
+
+- [MetaPhlAn](https://doi.org/10.1038/s41587-023-01688-w)
+
+ > Blanco-Míguez, A., Beghini, F., Cumbo, F., McIver, L. J., Thompson, K. N., Zolfo, M., Manghi, P., Dubois, L., Huang, K. D., Thomas, A. M., Nickols, W. A., Piccinno, G., Piperni, E., Punčochář, M., Valles-Colomer, M., Tett, A., Giordano, F., Davies, R., Wolf, J., … Segata, N. (2023). Extending and improving metagenomic taxonomic profiling with uncharacterized species using MetaPhlAn 4. Nature Biotechnology, 1–12. https://doi.org/10.1038/s41587-023-01688-w
+
+- [MALT](https://doi.org/10.1038/s41559-017-0446-6)
+
+ > Vågene, Å. J., Herbig, A., Campana, M. G., Robles García, N. M., Warinner, C., Sabin, S., Spyrou, M. A., Andrades Valtueña, A., Huson, D., Tuross, N., Bos, K. I., & Krause, J. (2018). Salmonella enterica genomes from victims of a major sixteenth-century epidemic in Mexico. Nature Ecology & Evolution, 2(3), 520–528. https://doi.org/10.1038/s41559-017-0446-6
+
+- [MEGAN](https://doi.org/10.1371/journal.pcbi.1004957)
+
+ > Huson, D. H., Beier, S., Flade, I., Górska, A., El-Hadidi, M., Mitra, S., Ruscheweyh, H.-J., & Tappu, R. (2016). MEGAN Community Edition - Interactive Exploration and Analysis of Large-Scale Microbiome Sequencing Data. PLoS Computational Biology, 12(6), e1004957. https://doi.org/10.1371/journal.pcbi.1004957
+
+- [DIAMOND](https://doi.org/10.1038/nmeth.3176)
+
+ > Buchfink, B., Xie, C., & Huson, D. H. (2015). Fast and sensitive protein alignment using DIAMOND. Nature Methods, 12(1), 59–60. https://doi.org/10.1038/nmeth.3176
+
+- [Centrifuge](https://doi.org/10.1101/gr.210641.116)
+
+ > Kim, D., Song, L., Breitwieser, F. P., & Salzberg, S. L. (2016). Centrifuge: rapid and sensitive classification of metagenomic sequences. Genome Research, 26(12), 1721–1729. https://doi.org/10.1101/gr.210641.116
+
+- [Kaiju](https://doi.org/10.1038/ncomms11257)
+
+ > Menzel, P., Ng, K. L., & Krogh, A. (2016). Fast and sensitive taxonomic classification for metagenomics with Kaiju. Nature Communications, 7, 11257. https://doi.org/10.1038/ncomms11257
+
+- [mOTUs](https://doi.org/10.1186/s40168-022-01410-z)
+
+ > Ruscheweyh, H.-J., Milanese, A., Paoli, L., Karcher, N., Clayssen, Q., Keller, M. I., Wirbel, J., Bork, P., Mende, D. R., Zeller, G., & Sunagawa, S. (2022). Cultivation-independent genomes greatly expand taxonomic-profiling capabilities of mOTUs across various environments. Microbiome, 10(1), 212. https://doi.org/10.1186/s40168-022-01410-z
+
+- [KMCP](https://doi.org/10.1093/bioinformatics/btac845)
+
+ > Shen, W., Xiang, H., Huang, T., Tang, H., Peng, M., Cai, D., Hu, P., & Ren, H. (2023). KMCP: accurate metagenomic profiling of both prokaryotic and viral populations by pseudo-mapping. Bioinformatics (Oxford, England), 39(1). https://doi.org/10.1093/bioinformatics/btac845
+
+- [ganon](https://doi.org/10.1093/bioinformatics/btaa458)
+
+ > Piro, V. C., Dadi, T. H., Seiler, E., Reinert, K., & Renard, B. Y. (2020). Ganon: Precise metagenomics classification against large and up-to-date sets of reference sequences. Bioinformatics (Oxford, England), 36(Suppl_1), i12–i20. https://doi.org/10.1093/bioinformatics/btaa458
+
+- [Krona](https://doi.org/10.1186/1471-2105-12-385)
+
+ > Ondov, B. D., Bergman, N. H., & Phillippy, A. M. (2011). Interactive metagenomic visualization in a Web browser. BMC Bioinformatics, 12. https://doi.org/10.1186/1471-2105-12-385
+
+- [TAXPASTA](https://doi.org/10.21105/joss.05627)
+
+ > Beber, M. E., Borry, M., Stamouli, S., & Fellows Yates, J. A. (2023). TAXPASTA: TAXonomic Profile Aggregation and STAndardisation. Journal of Open Source Software, 8(87), 5627. https://doi.org/10.21105/joss.05627
## Software packaging/containerisation tools
@@ -26,11 +122,11 @@
- [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/)
- > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506.
+ > Dale, R., Grüning, B., Sjödin, A., Rowe, J., Chapman, B. A., Tomkins-Tinch, C. H., Valieris, R., Batut, B., Caprez, A., Cokelaer, T., Yusuf, D., Beauchamp, K. A., Brinda, K., Wollmann, T., Corguillé, G. Le, Ryan, D., Bretaudeau, A., Hoogstrate, Y., Pedersen, B. S., … Köster, J. (2018). Bioconda: Sustainable and comprehensive software distribution for the life sciences. Nature Methods, 15(7). https://doi.org/10.1038/s41592-018-0046-7
- [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/)
- > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671.
+ > Da Veiga Leprevost, F., Grüning, B. A., Alves Aflitos, S., Röst, H. L., Uszkoreit, J., Barsnes, H., Vaudel, M., Moreno, P., Gatto, L., Weber, J., Bai, M., Jimenez, R. C., Sachsenberg, T., Pfeuffer, J., Vera Alvarez, R., Griss, J., Nesvizhskii, A. I., & Perez-Riverol, Y. (2017). BioContainers: An open-source and community-driven framework for software standardization. Bioinformatics, 33(16). https://doi.org/10.1093/bioinformatics/btx192
- [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241)
@@ -38,4 +134,14 @@
- [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/)
- > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675.
+ > Kurtzer, G. M., Sochat, V., & Bauer, M. W. (2017). Singularity: Scientific containers for mobility of compute. PLoS ONE, 12(5). https://doi.org/10.1371/journal.pone.0177459
+
+## Data
+
+- [Maixner (2021)](https://doi.org/10.1016/j.cub.2021.09.031) (CI Test Data)
+
+ > Maixner, F., Sarhan, M. S., Huang, K. D., Tett, A., Schoenafinger, A., Zingale, S., Blanco-Míguez, A., Manghi, P., Cemper-Kiesslich, J., Rosendahl, W., Kusebauch, U., Morrone, S. R., Hoopmann, M. R., Rota-Stabelli, O., Rattei, T., Moritz, R. L., Oeggl, K., Segata, N., Zink, A., … Kowarik, K. (2021). Hallstatt miners consumed blue cheese and beer during the Iron Age and retained a non-Westernized gut microbiome until the Baroque period. Current Biology, 31(23). https://doi.org/10.1016/j.cub.2021.09.031
+
+- [Meslier (2022)](https://doi.org/10.1038/s41597-022-01762-z) (AWS Full Test data)
+
+ > Meslier, V., Quinquis, B., Da Silva, K., Plaza Oñate, F., Pons, N., Roume, H., Podar, M., & Almeida, M. (2022). Benchmarking second and third-generation sequencing platforms for microbial metagenomics. Scientific Data, 9(1). https://doi.org/10.1038/s41597-022-01762-z
diff --git a/README.md b/README.md
index fe76e3c0..a991a99d 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,17 @@
+<<<<<<< HEAD
[![GitHub Actions CI Status](https://github.com/nf-core/taxprofiler/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/taxprofiler/actions/workflows/ci.yml)
[![GitHub Actions Linting Status](https://github.com/nf-core/taxprofiler/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/taxprofiler/actions/workflows/linting.yml)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/taxprofiler/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)
+=======
+[![GitHub Actions CI Status](https://github.com/nf-core/taxprofiler/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/taxprofiler/actions?query=workflow%3A%22nf-core+CI%22)
+[![GitHub Actions Linting Status](https://github.com/nf-core/taxprofiler/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/taxprofiler/actions?query=workflow%3A%22nf-core+linting%22)[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/taxprofiler/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7728364-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7728364)
+>>>>>>> dev
[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)
[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)
@@ -16,53 +21,79 @@
[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23taxprofiler-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/taxprofiler)[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)
-## Introduction
-
-**nf-core/taxprofiler** is a bioinformatics pipeline that ...
-
-
+[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Preprint-orange)](https://doi.org/10.1101/2023.10.20.563221)
-
-
+## Introduction
-1. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/))
-2. Present QC for raw reads ([`MultiQC`](http://multiqc.info/))
+**nf-core/taxprofiler** is a bioinformatics best-practice analysis pipeline for taxonomic classification and profiling of shotgun short- and long-read metagenomic data. It allows for in-parallel taxonomic identification of reads or taxonomic abundance estimation with multiple classification and profiling tools against multiple databases, and produces standardised output tables for facilitating results comparison between different tools and databases.
+
+## Pipeline summary
+
+![](docs/images/taxprofiler_tube.png)
+
+1. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) or [`falco`](https://github.com/smithlabcode/falco) as an alternative option)
+2. Performs optional read pre-processing
+ - Adapter clipping and merging (short-read: [fastp](https://github.com/OpenGene/fastp), [AdapterRemoval2](https://github.com/MikkelSchubert/adapterremoval); long-read: [porechop](https://github.com/rrwick/Porechop))
+ - Low complexity and quality filtering (short-read: [bbduk](https://jgi.doe.gov/data-and-tools/software-tools/bbtools/), [PRINSEQ++](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus); long-read: [Filtlong](https://github.com/rrwick/Filtlong))
+ - Host-read removal (short-read: [BowTie2](http://bowtie-bio.sourceforge.net/bowtie2/); long-read: [Minimap2](https://github.com/lh3/minimap2))
+ - Run merging
+3. Supports statistics for host-read removal ([Samtools](http://www.htslib.org/))
+4. Performs taxonomic classification and/or profiling using one or more of:
+ - [Kraken2](https://ccb.jhu.edu/software/kraken2/)
+ - [MetaPhlAn](https://huttenhower.sph.harvard.edu/metaphlan/)
+ - [MALT](https://uni-tuebingen.de/fakultaeten/mathematisch-naturwissenschaftliche-fakultaet/fachbereiche/informatik/lehrstuehle/algorithms-in-bioinformatics/software/malt/)
+ - [DIAMOND](https://github.com/bbuchfink/diamond)
+ - [Centrifuge](https://ccb.jhu.edu/software/centrifuge/)
+ - [Kaiju](https://kaiju.binf.ku.dk/)
+ - [mOTUs](https://motu-tool.org/)
+ - [KrakenUniq](https://github.com/fbreitwieser/krakenuniq)
+ - [KMCP](https://github.com/shenwei356/kmcp)
+ - [ganon](https://pirovc.github.io/ganon/)
+5. Perform optional post-processing with:
+ - [bracken](https://ccb.jhu.edu/software/bracken/)
+6. Standardises output tables ([`Taxpasta`](https://taxpasta.readthedocs.io))
+7. Present QC for raw reads ([`MultiQC`](http://multiqc.info/))
+8. Plotting Kraken2, Centrifuge, Kaiju and MALT results ([`Krona`](https://hpc.nih.gov/apps/kronatools.html))
## Usage
> [!NOTE]
> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.
-
+Additionally, you will need a database sheet that looks as follows:
-Now, you can run the pipeline using:
+`databases.csv`:
+
+```
+tool,db_name,db_params,db_path
+kraken2,db2,--quick,///kraken2/testdb-kraken2.tar.gz
+metaphlan,db1,,///metaphlan/metaphlan_database/
+```
+
+That includes directories or `.tar.gz` archives containing databases for the tools you wish to run the pipeline against.
-
+Now, you can run the pipeline using:
```bash
nextflow run nf-core/taxprofiler \
-profile \
--input samplesheet.csv \
- --outdir
+ --databases databases.csv \
+ --outdir \
+ --run_kraken2 --run_metaphlan
```
> [!WARNING]
@@ -81,9 +112,34 @@ For more details about the output files and reports, please refer to the
nf-core/taxprofiler was originally written by James A. Fellows Yates, Sofia Stamouli, Moritz E. Beber, and the nf-core/taxprofiler team.
-We thank the following people for their extensive assistance in the development of this pipeline:
+### Team
+
+- [James A. Fellows Yates](https://github.com/jfy133)
+- [Sofia Stamouli](https://github.com/sofstam)
+- [Moritz E. Beber](https://github.com/Midnighter)
+
+We thank the following people for their contributions to the development of this pipeline:
+
+- [Lauri Mesilaakso](https://github.com/ljmesi)
+- [Tanja Normark](https://github.com/talnor)
+- [Maxime Borry](https://github.com/maxibor)
+- [Thomas A. Christensen II](https://github.com/MillironX)
+- [Jianhong Ou](https://github.com/jianhong)
+- [Rafal Stepien](https://github.com/rafalstepien)
+- [Mahwash Jamy](https://github.com/mjamy)
+- [Lily Andersson Lee](https://github.com/LilyAnderssonLee)
+
+### Acknowledgments
-
+We also are grateful for the feedback and comments from:
+
+- The general [nf-core/community](https://nf-co.re/community)
+
+And specifically to
+
+- [Alex Hübner](https://github.com/alexhbnr)
+
+❤️ also goes to [Zandra Fagernäs](https://github.com/ZandraFagernas) for the logo.
## Contributions and Support
@@ -93,10 +149,11 @@ For further information or help, don't hesitate to get in touch on the [Slack `#
## Citations
-
-
+If you use nf-core/taxprofiler for your analysis, please cite it using the following doi: [10.1101/2023.10.20.563221](https://doi.org/10.1101/2023.10.20.563221).
+
+> Stamouli, S., Beber, M. E., Normark, T., Christensen II, T. A., Andersson-Li, L., Borry, M., Jamy, M., nf-core community, & Fellows Yates, J. A. (2023). nf-core/taxprofiler: Highly parallelised and flexible pipeline for metagenomic taxonomic classification and profiling. In bioRxiv (p. 2023.10.20.563221). https://doi.org/10.1101/2023.10.20.563221
-
+For the latest version of the code, cite the Zenodo doi: [10.5281/zenodo.7728364](https://doi.org/10.5281/zenodo.7728364)
An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.
diff --git a/assets/methods_description_template.yml b/assets/methods_description_template.yml
index d5119e4a..8b9a651b 100644
--- a/assets/methods_description_template.yml
+++ b/assets/methods_description_template.yml
@@ -3,8 +3,6 @@ description: "Suggested text and references to use when describing pipeline usag
section_name: "nf-core/taxprofiler Methods Description"
section_href: "https://github.com/nf-core/taxprofiler"
plot_type: "html"
-## TODO nf-core: Update the HTML below to your preferred methods description, e.g. add publication citation for this pipeline
-## You inject any metadata in the Nextflow '${workflow}' object
data: |
Methods
Data was processed using nf-core/taxprofiler v${workflow.manifest.version} ${doi_text} of the nf-core collection of workflows (Ewels et al., 2020), utilising reproducible software environments from the Bioconda (Grüning et al., 2018) and Biocontainers (da Veiga Leprevost et al., 2017) projects.
@@ -17,12 +15,13 @@ data: |
Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., & Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. Nature Biotechnology, 38(3), 276-278. doi: 10.1038/s41587-020-0439-x
Grüning, B., Dale, R., Sjödin, A., Chapman, B. A., Rowe, J., Tomkins-Tinch, C. H., Valieris, R., Köster, J., & Bioconda Team. (2018). Bioconda: sustainable and comprehensive software distribution for the life sciences. Nature Methods, 15(7), 475–476. doi: 10.1038/s41592-018-0046-7
da Veiga Leprevost, F., Grüning, B. A., Alves Aflitos, S., Röst, H. L., Uszkoreit, J., Barsnes, H., Vaudel, M., Moreno, P., Gatto, L., Weber, J., Bai, M., Jimenez, R. C., Sachsenberg, T., Pfeuffer, J., Vera Alvarez, R., Griss, J., Nesvizhskii, A. I., & Perez-Riverol, Y. (2017). BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics (Oxford, England), 33(16), 2580–2582. doi: 10.1093/bioinformatics/btx192
+
Stamouli, S., Beber, M. E., Normark, T., Christensen, T. A., Andersson-Li, L., Borry, M., Jamy, M., nf-core community, & Fellows Yates, J. A. (2023). nf-core/taxprofiler: Highly parallelised and flexible pipeline for metagenomic taxonomic classification and profiling. (Preprint). bioRxiv 2023.10.20.563221. doi: 10.1101/2023.10.20.563221
${tool_bibliography}
Notes:
- ${nodoi_text}
+ ${doi_text}
The command above does not include parameters contained in any configs or profiles that may have been used. Ensure the config file is also uploaded with your publication!
You should also cite all software used within this run. Check the "Software Versions" of this report to get version information.
diff --git a/assets/multiqc_config.yml b/assets/multiqc_config.yml
index 8e75884e..58909c5e 100644
--- a/assets/multiqc_config.yml
+++ b/assets/multiqc_config.yml
@@ -2,6 +2,7 @@ report_comment: >
This report has been generated by the nf-core/taxprofiler
analysis pipeline. For information about how to interpret these results, please see the
documentation.
+
report_section_order:
"nf-core-taxprofiler-methods-description":
order: -1000
@@ -12,4 +13,273 @@ report_section_order:
export_plots: true
+<<<<<<< HEAD
disable_version_detection: true
+=======
+custom_logo: "nf-core-taxprofiler_logo_custom_light.png"
+custom_logo_url: https://nf-co.re/taxprofiler
+custom_logo_title: "nf-core/taxprofiler"
+
+run_modules:
+ - fastqc
+ - adapterRemoval
+ - fastp
+ - bbduk
+ - prinseqplusplus
+ - porechop
+ - filtlong
+ - bowtie2
+ - minimap2
+ - samtools
+ - kraken
+ - kaiju
+ - diamond
+ - malt
+ - motus
+ - custom_content
+
+sp:
+ diamond:
+ fn_re: ".*.diamond.log$"
+ fastqc/data:
+ fn_re: ".*(fastqc|falco)_data.txt$"
+ fastqc/zip:
+ fn: "*_fastqc.zip"
+
+top_modules:
+ - "fastqc":
+ name: "FastQC / Falco (pre-Trimming)"
+ path_filters:
+ - "*raw*"
+ path_filters_exclude:
+ - "*processed*"
+ extra: "If used in this run, Falco is a drop-in replacement for FastQC producing the same output, written by Guilherme de Sena Brandine and Andrew D. Smith."
+ - "fastqc":
+ name: "FastQC / Falco (post-Trimming)"
+ path_filters:
+ - "*processed*"
+ path_filters_exclude:
+ - "*raw*"
+ extra: "If used in this run, Falco is a drop-in replacement for FastQC producing the same output, written by Guilherme de Sena Brandine and Andrew D. Smith."
+ - "fastp"
+ - "adapterRemoval"
+ - "porechop":
+ extra: "ℹ️: if you get the error message 'Error - was not able to plot data.' this means that porechop did not detect any adapters and therefore no statistics generated."
+ - "bbduk"
+ - "prinseqplusplus"
+ - "filtlong"
+ - "bowtie2":
+ name: "bowtie2"
+ - "samtools":
+ name: "Samtools Stats"
+ - "kraken":
+ name: "Kraken"
+ path_filters:
+ - "*.kraken2.kraken2.report.txt"
+ - "kraken":
+ name: "Bracken"
+ anchor: "bracken"
+ target: "Bracken"
+ doi: "10.7717/peerj-cs.104"
+ info: "Estimates species abundances in metagenomics samples by probabilistically re-distributing reads in the taxonomic tree."
+ extra: "ℹ️: plot title will say Kraken2 due to the first step of bracken producing the same output format as Kraken. Abundance information is currently not supported in MultiQC."
+ path_filters:
+ - "*.bracken.kraken2.report.txt"
+ - "kraken":
+ name: "Centrifuge"
+ anchor: "centrifuge"
+ target: "Centrifuge"
+ doi: "10.1101/gr.210641.116"
+ info: "is a very rapid and memory-efficient system for the classification of DNA sequences from microbial samples. The system uses a novel indexing scheme based on the Burrows-Wheeler transform (BWT) and the Ferragina-Manzini (FM) index. Note: Figure title"
+ extra: "ℹ️: plot title will say Kraken2 due to Centrifuge producing the same output format as Kraken. If activated, see the actual Kraken2 results in the section above."
+ path_filters:
+ - "*.centrifuge.txt"
+ - "malt":
+ name: "MALT"
+ - "diamond"
+ - "kaiju":
+ name: "Kaiju"
+ - "motus"
+
+#It is not possible to set placement for custom kraken and centrifuge columns.
+
+table_columns_placement:
+ FastQC / Falco (pre-Trimming):
+ total_sequences: 100
+ avg_sequence_length: 110
+ median_sequence_length: 120
+ percent_duplicates: 130
+ percent_gc: 140
+ percent_fails: 150
+ FastQC / Falco (post-Trimming):
+ total_sequences: 200
+ avg_sequence_length: 210
+ median_sequence_length: 220
+ percent_duplicates: 230
+ percent_gc: 240
+ percent_fails: 250
+ fastp:
+ pct_adapter: 300
+ pct_surviving: 310
+ pct_duplication: 320
+ after_filtering_gc_content: 330
+ after_filtering_q30_rate: 340
+ after_filtering_q30_bases: 350
+ filtering_result_passed_filter_reads: 360
+ Adapter Removal:
+ aligned_total: 360
+ percent_aligned: 370
+ percent_collapsed: 380
+ percent_discarded: 390
+ Porechop:
+ Input Reads: 400
+ Start Trimmed: 410
+ Start Trimmed Percent: 420
+ End Trimmed: 430
+ End Trimmed Percent: 440
+ Middle Split: 450
+ Middle Split Percent: 460
+ Filtlong:
+ Target bases: 500
+ BBDuk:
+ Input reads: 800
+ Total Removed bases percent: 810
+ Total Removed bases: 820
+ Total Removed reads percent: 830
+ Total Removed reads: 840
+ PRINSEQ++:
+ prinseqplusplus_total: 900
+ bowtie2:
+ overall_alignment_rate: 1000
+ Samtools Stats:
+ raw_total_sequences: 1100
+ reads_mapped: 1110
+ reads_mapped_percent: 1120
+ reads_properly_paired_percent: 1130
+ non-primary_alignments: 1140
+ reads_MQ0_percent: 1150
+ error_rate: 1160
+ Bracken:
+ "% Unclassified": 1200
+ "% Top 5": 1210
+ Centrifuge:
+ "% Unclassified": 1300
+ "% Top 5": 1310
+ DIAMOND:
+ queries_aligned: 1400
+ Kaiju:
+ assigned: 1500
+ "% Assigned": 1510
+ "% Unclassified": 1520
+ Kraken:
+ "% Unclassified": 1600
+ "% Top 5": 1610
+ MALT:
+ "Num. of queries": 1700
+ Total reads: 1710
+ Mappability: 1720
+ Assig. Taxonomy: 1730
+ Taxonomic assignment success: 1740
+ motus:
+ Total number of reads: 1800
+ Number of reads after filtering: 1810
+ Total number of inserts: 1820
+ Unique mappers: 1830
+ Multiple mappers: 1840
+ Ignored multiple mapper without unique hit: 1850
+ "Number of ref-mOTUs": 1860
+ "Number of meta-mOTUs": 1870
+ "Number of ext-mOTUs": 1880
+
+table_columns_visible:
+ FastQC / Falco (pre-Trimming):
+ total_sequences: True
+ avg_sequence_length: True
+ percent_duplicates: True
+ percent_gc: True
+ percent_fails: False
+ FastQC / Falco (post-Trimming):
+ total_sequences: True
+ avg_sequence_length: True
+ percent_duplicates: False
+ percent_gc: False
+ percent_fails: False
+ porechop:
+ Input reads: False
+ Start Trimmed:
+ Start Trimmed Percent: True
+ End Trimmed: False
+ End Trimmed Percent: True
+ Middle Split: False
+ Middle Split Percent: True
+ fastp:
+ pct_adapter: True
+ pct_surviving: True
+ pct_duplication: False
+ after_filtering_gc_content: False
+ after_filtering_q30_rate: False
+ after_filtering_q30_bases: False
+ Filtlong:
+ Target bases: True
+ Adapter Removal:
+ aligned_total: True
+ percent_aligned: True
+ percent_collapsed: True
+ percent_discarded: False
+ BBDuk:
+ Input reads: False
+ Total Removed bases Percent: False
+ Total Removed bases: False
+ Total Removed reads percent: True
+ Total Removed reads: False
+ "PRINSEQ++":
+ prinseqplusplus_total: True
+ bowtie2:
+ overall_alignment_rate: True
+ Samtools Stats:
+ raw_total_sequences: True
+ reads_mapped: True
+ reads_mapped_percent: True
+ reads_properly_paired_percent: False
+ non-primary_alignments: False
+ reads_MQ0_percent: False
+ error_rate: False
+ Kraken: False
+ Bracken: False
+ Centrifuge: False
+ DIAMOND: False
+ Kaiju: False
+ MALT: False
+ motus: False
+
+table_columns_name:
+ FastQC / Falco (pre-Trimming):
+ total_sequences: "Nr. Input Reads"
+ avg_sequence_length: "Length Input Reads"
+ percent_gc: "% GC Input Reads"
+ percent_duplicates: "% Dups Input Reads"
+ percent_fails: "% Failed Input Reads"
+ FastQC / Falco (post-Trimming):
+ total_sequences: "Nr. Processed Reads"
+ avg_sequence_length: "Length Processed Reads"
+ percent_gc: "% GC Processed Reads"
+ percent_duplicates: "% Dups Processed Reads"
+ percent_fails: "% Failed Processed Reads"
+ Samtools Stats:
+ raw_total_sequences: "Nr. Reads Into Mapping"
+ reads_mapped: "Nr. Mapped Reads"
+ reads_mapped_percent: "% Mapped Reads"
+
+extra_fn_clean_exts:
+ - "kraken2.report.txt"
+ - ".txt"
+ - ".settings"
+ - ".bbduk"
+ - ".unmapped"
+ - "_filtered"
+ - type: remove
+ pattern: "_falco"
+
+section_comments:
+ general_stats: "By default, all read count columns are displayed as millions (M) of reads."
+>>>>>>> dev
diff --git a/assets/samplesheet.csv b/assets/samplesheet.csv
index 5f653ab7..82565b15 100644
--- a/assets/samplesheet.csv
+++ b/assets/samplesheet.csv
@@ -1,3 +1,6 @@
-sample,fastq_1,fastq_2
-SAMPLE_PAIRED_END,/path/to/fastq/files/AEG588A1_S1_L002_R1_001.fastq.gz,/path/to/fastq/files/AEG588A1_S1_L002_R2_001.fastq.gz
-SAMPLE_SINGLE_END,/path/to/fastq/files/AEG588A4_S4_L003_R1_001.fastq.gz,
+sample,run_accession,instrument_platform,fastq_1,fastq_2,fasta
+2611,ERR5766174,ILLUMINA,,,///fasta/ERX5474930_ERR5766174_1.fa.gz
+2612,ERR5766176,ILLUMINA,///fastq/ERX5474932_ERR5766176_1.fastq.gz,///fastq/ERX5474932_ERR5766176_2.fastq.gz,
+2612,ERR5766180,ILLUMINA,///fastq/ERX5474936_ERR5766180_1.fastq.gz,,
+2613,ERR5766181,ILLUMINA,///fastq/ERX5474937_ERR5766181_1.fastq.gz,///fastq/ERX5474937_ERR5766181_2.fastq.gz,
+ERR3201952,ERR3201952,OXFORD_NANOPORE,///fastq/ERR3201952.fastq.gz,,
diff --git a/assets/schema_database.json b/assets/schema_database.json
new file mode 100644
index 00000000..a9a8f13a
--- /dev/null
+++ b/assets/schema_database.json
@@ -0,0 +1,79 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema",
+ "$id": "https://raw.githubusercontent.com/nf-core/taxprofiler/master/assets/schema_database.json",
+ "title": "nf-core/taxprofiler pipeline - params.database schema",
+ "description": "Schema for the file provided with params.database",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "tool": {
+ "type": "string",
+ "exists": true,
+ "pattern": "^\\S+$",
+ "enum": [
+ "bracken",
+ "centrifuge",
+ "diamond",
+ "ganon",
+ "kaiju",
+ "kmcp",
+ "kraken2",
+ "krakenuniq",
+ "malt",
+ "metaphlan",
+ "motus"
+ ],
+ "errorMessage": "Invalid tool name. Please see documentation for all supported profilers. Currently these classifers are included: bracken, centrifuge, diamond, ganon, kaiju, kmcp, kraken2, krakenuniq, malt, metaphlan, motus.",
+ "meta": ["tool"]
+ },
+ "db_name": {
+ "type": "string",
+ "exists": true,
+ "pattern": "^\\S+$",
+ "errorMessage": "The unique name of the database should be provided.",
+ "meta": ["db_name"]
+ },
+ "db_params": {
+ "type": "string",
+ "pattern": "^[^\"']*$",
+ "anyOf": [
+ {
+ "properties": {
+ "tool": { "const": "bracken" }
+ },
+ "not": {
+ "pattern": ".*;"
+ },
+ "errorMessage": "Invalid database db_params entry. Bracken requires a semi-colon if passing parameter."
+ },
+ {
+ "properties": {
+ "tool": { "const": "kmcp" }
+ },
+ "pattern": ".*;$",
+ "errorMessage": "Invalid database db_params entry. KMCP only requires a semi-colon if passing arguments to KMCP profile, in cases of which the arguments should go after the semi-colon."
+ },
+ {
+ "not": {
+ "properties": {
+ "tool": { "enum": ["bracken", "kmcp"] }
+ }
+ },
+ "errorMessage": "Invalid database db_params entry."
+ }
+ ],
+ "errorMessage": "Invalid database db_params entry. No quotes allowed.",
+ "meta": ["db_params"]
+ },
+ "db_path": {
+ "type": "string",
+ "exists": true,
+ "format": "file-path",
+ "errorMessage": "The database path could not be found."
+ }
+ },
+ "required": ["tool", "db_name", "db_path"],
+ "uniqueEntries": ["tool", "db_name"]
+ }
+}
diff --git a/assets/schema_input.json b/assets/schema_input.json
index fe4d4a00..6acc00f7 100644
--- a/assets/schema_input.json
+++ b/assets/schema_input.json
@@ -9,25 +9,49 @@
"properties": {
"sample": {
"type": "string",
- "pattern": "^\\S+$",
"errorMessage": "Sample name must be provided and cannot contain spaces",
"meta": ["id"]
},
+ "run_accession": {
+ "type": "string",
+ "errorMessage": "Run accession must be provided and cannot contain spaces."
+ },
+ "instrument_platform": {
+ "type": "string",
+ "enum": [
+ "ABI_SOLID",
+ "BGISEQ",
+ "CAPILLARY",
+ "COMPLETE_GENOMICS",
+ "DNBSEQ",
+ "HELICOS",
+ "ILLUMINA",
+ "ION_TORRENT",
+ "LS454",
+ "OXFORD_NANOPORE",
+ "PACBIO_SMRT"
+ ],
+ "errorMessage": "Sequencing platform must be provided."
+ },
"fastq_1": {
"type": "string",
"format": "file-path",
- "exists": true,
"pattern": "^\\S+\\.f(ast)?q\\.gz$",
"errorMessage": "FastQ file for reads 1 must be provided, cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'"
},
"fastq_2": {
"type": "string",
"format": "file-path",
- "exists": true,
"pattern": "^\\S+\\.f(ast)?q\\.gz$",
- "errorMessage": "FastQ file for reads 2 cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'"
+ "errorMessage": "FastQ file for reads 2 cannot contain spaces and must have extension '.fq.gz' or '.fastq.gz'. If not applicable, leave it empty."
+ },
+ "fasta": {
+ "type": "string",
+ "format": "file-path",
+ "pattern": "^\\S+\\.(f(ast)?q|fa(sta)?)\\.gz$",
+ "errorMessage": "FastA file must be provided, cannot contain spaces and must have extension '.fa.gz' or '.fasta.gz'. If not applicable, leave it empty."
}
},
- "required": ["sample", "fastq_1"]
+ "required": ["sample", "run_accession", "instrument_platform"]
}
}
diff --git a/conf/base.config b/conf/base.config
index 372f0798..12ca2afa 100644
--- a/conf/base.config
+++ b/conf/base.config
@@ -10,7 +10,6 @@
process {
- // TODO nf-core: Check the defaults for all processes
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
memory = { check_max( 6.GB * task.attempt, 'memory' ) }
time = { check_max( 4.h * task.attempt, 'time' ) }
@@ -24,11 +23,10 @@ process {
// These labels are used and recognised by default in DSL2 files hosted on nf-core/modules.
// If possible, it would be nice to keep the same label naming convention when
// adding in your local modules too.
- // TODO nf-core: Customise requirements for specific processes.
// See https://www.nextflow.io/docs/latest/config.html#config-process-selectors
withLabel:process_single {
cpus = { check_max( 1 , 'cpus' ) }
- memory = { check_max( 6.GB * task.attempt, 'memory' ) }
+ memory = { check_max( 1.GB * task.attempt, 'memory' ) }
time = { check_max( 4.h * task.attempt, 'time' ) }
}
withLabel:process_low {
@@ -62,4 +60,32 @@ process {
withName:CUSTOM_DUMPSOFTWAREVERSIONS {
cache = false
}
+
+ withName: BRACKEN_BRACKEN {
+ errorStrategy = 'ignore'
+ }
+
+ withName: CENTRIFUGE_KREPORT {
+ errorStrategy = {task.exitStatus == 255 ? 'ignore' : 'retry'}
+ }
+
+ withName: KRAKENTOOLS_COMBINEKREPORTS_CENTRIFUGE {
+ errorStrategy = { task.exitStatus in [255,1] ? 'ignore' : 'retry' }
+ }
+
+ withName: MEGAN_RMA2INFO_TSV {
+ cpus = { check_max( 1 , 'cpus' ) }
+ memory = { check_max( 6.GB * task.attempt, 'memory' ) }
+ time = { check_max( 4.h * task.attempt, 'time' ) }
+ }
+ withName: MEGAN_RMA2INFO_KRONA {
+ cpus = { check_max( 1 , 'cpus' ) }
+ memory = { check_max( 6.GB * task.attempt, 'memory' ) }
+ time = { check_max( 4.h * task.attempt, 'time' ) }
+ }
+ withName: FALCO {
+ cpus = { check_max( 6 , 'cpus' ) }
+ memory = { check_max( 4.GB * task.attempt, 'memory' ) }
+ time = { check_max( 4.h * task.attempt, 'time' ) }
+ }
}
diff --git a/conf/modules.config b/conf/modules.config
index e3ea8fa6..d7488250 100644
--- a/conf/modules.config
+++ b/conf/modules.config
@@ -12,14 +12,759 @@
process {
- publishDir = [
- path: { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" },
- mode: params.publish_dir_mode,
- saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
+ withName: FASTQC {
+ ext.args = '--quiet'
+ ext.prefix = { "${meta.id}_${meta.run_accession}_raw" }
+ publishDir = [
+ path: { "${params.outdir}/fastqc/raw" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{html,zip}'
+ ]
+ }
+
+ withName: FASTQC_PROCESSED {
+ ext.args = '--quiet'
+ ext.prefix = { "${meta.id}_${meta.run_accession}_processed" }
+ publishDir = [
+ path: { "${params.outdir}/fastqc/processed" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{html,zip}'
+ ]
+ }
+
+ withName: FALCO {
+ ext.prefix = { "${meta.id}_${meta.run_accession}_raw_falco" }
+ publishDir = [
+ path: { "${params.outdir}/falco/raw" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{html,txt,zip}'
+ ]
+ }
+
+ withName: FALCO_PROCESSED {
+ ext.prefix = { "${meta.id}_${meta.run_accession}_processed_falco" }
+ publishDir = [
+ path: { "${params.outdir}/falco/processed" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{html,txt,zip}'
+ ]
+ }
+
+ withName: FASTP_SINGLE {
+ ext.args = [
+ // trimming options
+ params.shortread_qc_skipadaptertrim ? "--disable_adapter_trimming" : "",
+ params.shortread_qc_adapterlist ? "" : params.shortread_qc_adapter1 ? "--adapter_sequence ${params.shortread_qc_adapter1}" : "",
+ // filtering options
+ "--length_required ${params.shortread_qc_minlength}",
+ (params.perform_shortread_complexityfilter && params.shortread_complexityfilter_tool == 'fastp') ? "--low_complexity_filter --complexity_threshold ${params.shortread_complexityfilter_fastp_threshold}" : '',
+ params.shortread_qc_dedup ? "--dedup" : ""
+ ].join(' ').trim()
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/fastp" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_preprocessed_reads
+ ],
+ [
+ path: { "${params.outdir}/fastp" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{log,html,json}'
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ // Don't know why `!` doesn't work here, but `== false` makes it work...
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_shortread_hostremoval && !params.perform_shortread_complexityfilter && params.perform_shortread_qc && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: FASTP_PAIRED {
+ ext.args = [
+ // collapsing options - option to retain singletons
+ params.shortread_qc_includeunmerged ? '--include_unmerged' : '',
+ // trimming options
+ params.shortread_qc_skipadaptertrim ? "--disable_adapter_trimming" : "",
+ params.shortread_qc_adapterlist ? "" : params.shortread_qc_adapter1 ? "--adapter_sequence ${params.shortread_qc_adapter1}" : "",
+ params.shortread_qc_adapterlist ? "" : params.shortread_qc_adapter2 ? "--adapter_sequence_r2 ${params.shortread_qc_adapter2}" : "--detect_adapter_for_pe",
+ // filtering options
+ "--length_required ${params.shortread_qc_minlength}",
+ params.perform_shortread_complexityfilter && params.shortread_complexityfilter_tool == 'fastp' ? "--low_complexity_filter --complexity_threshold ${params.shortread_complexityfilter_fastp_threshold}" : '',
+ params.shortread_qc_dedup ? "--dedup" : ""
+ ].join(' ').trim()
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/fastp" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_preprocessed_reads
+ ],
+ [
+ path: { "${params.outdir}/fastp" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{log,html,json}'
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: params.shortread_qc_mergepairs ? '*merged.fastq.gz' : '*.fastp.fastq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_shortread_hostremoval && !params.perform_shortread_complexityfilter && params.perform_shortread_qc && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: ADAPTERREMOVAL_SINGLE {
+ ext.args = [
+ // trimming options
+ params.shortread_qc_skipadaptertrim ? "--adapter1 ''" : params.shortread_qc_adapterlist ? "" : params.shortread_qc_adapter1 ? "--adapter1 ${params.shortread_qc_adapter1}" : "",
+ // filtering options
+ "--minlength ${params.shortread_qc_minlength}"
+ ].join(' ').trim()
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/adapterremoval" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_preprocessed_reads
+ ],
+ [
+ path: { "${params.outdir}/adapterremoval" },
+ mode: params.publish_dir_mode,
+ pattern: '*.settings'
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*truncated.fastq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_shortread_hostremoval && !params.perform_shortread_complexityfilter && params.perform_shortread_qc && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: ADAPTERREMOVAL_PAIRED {
+ ext.args = [
+ // collapsing options
+ params.shortread_qc_mergepairs ? "--collapse" : "",
+ // trimming options
+ params.shortread_qc_skipadaptertrim ? "--adapter1 ''" : params.shortread_qc_adapterlist ? "" : params.shortread_qc_adapter1 ? "--adapter1 ${params.shortread_qc_adapter1}" : "", // adding adapter list happens at module input channel level
+ params.shortread_qc_skipadaptertrim ? "--adapter2 ''" : params.shortread_qc_adapterlist ? "" : params.shortread_qc_adapter2 ? "--adapter2 ${params.shortread_qc_adapter2}" : "",
+ // filtering options
+ "--minlength ${params.shortread_qc_minlength}"
+ ].join(' ').trim()
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/adapterremoval" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_preprocessed_reads
+ ],
+ [
+ path: { "${params.outdir}/adapterremoval" },
+ mode: params.publish_dir_mode,
+ pattern: '*.settings'
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*{truncated.fastq,singleton.truncated}.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_shortread_hostremoval && !params.perform_shortread_complexityfilter && params.perform_shortread_qc && !params.shortread_qc_mergepairs && params.save_analysis_ready_fastqs ? it : null}
+ ]
+ ]
+ }
+
+ // AdapterRemoval separate output merging
+ withName: CAT_FASTQ {
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_shortread_hostremoval && !params.perform_shortread_complexityfilter && params.perform_shortread_qc && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: PORECHOP_PORECHOP {
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/porechop" },
+ mode: params.publish_dir_mode,
+ pattern: '*_porechopped.fastq.gz',
+ enabled: params.save_preprocessed_reads
+ ],
+ [
+ path: { "${params.outdir}/porechop" },
+ mode: params.publish_dir_mode,
+ pattern: '*.log'
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*_porechopped.fastq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_longread_hostremoval && params.longread_qc_skipqualityfilter && !params.longread_qc_skipadaptertrim && params.perform_longread_qc && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: FILTLONG {
+ ext.args = [
+ "--min_length ${params.longread_qc_qualityfilter_minlength}",
+ "--keep_percent ${params.longread_qc_qualityfilter_keeppercent}",
+ "--target_bases ${params.longread_qc_qualityfilter_targetbases}"
+ ]
+ .join(' ').trim()
+ ext.prefix = { "${meta.id}_${meta.run_accession}_filtered" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/filtlong" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_preprocessed_reads
+ ],
+ [
+ path: { "${params.outdir}/filtlong" },
+ mode: params.publish_dir_mode,
+ pattern: '*.log'
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_longread_hostremoval && !params.longread_qc_skipqualityfilter && params.perform_longread_qc && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: BBMAP_BBDUK {
+ ext.args = [
+ "entropy=${params.shortread_complexityfilter_entropy}",
+ "entropywindow=${params.shortread_complexityfilter_bbduk_windowsize}",
+ params.shortread_complexityfilter_bbduk_mask ? "entropymask=t" : "entropymask=f"
+ ].join(' ').trim()
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/bbduk/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{fastq.gz}',
+ enabled: params.save_complexityfiltered_reads
+ ],
+ [
+ path: { "${params.outdir}/bbduk/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.log'
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_shortread_hostremoval && params.shortread_complexityfilter_tool && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: PRINSEQPLUSPLUS {
+ ext.args = [
+ params.shortread_complexityfilter_prinseqplusplus_mode == 'dust' ? "-lc_dust=${params.shortread_complexityfilter_prinseqplusplus_dustscore}" : "-lc_entropy=${params.shortread_complexityfilter_entropy}",
+ "-trim_qual_left=0 -trim_qual_left=0 -trim_qual_window=0 -trim_qual_step=0",
+ ].join(' ').trim()
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/prinseqplusplus/" },
+ mode: params.publish_dir_mode,
+ pattern: '*{_good_out.fastq.gz,_good_out_R1.fastq.gz,_good_out_R2.fastq.gz}',
+ enabled: params.save_complexityfiltered_reads
+ ],
+ [
+ path: { "${params.outdir}/prinseqplusplus/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.log'
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*{_good_out.fastq.gz,_good_out_R1.fastq.gz,_good_out_R2.fastq.gz}',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && !params.perform_shortread_hostremoval && params.shortread_complexityfilter_tool && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: BOWTIE2_BUILD {
+ publishDir = [
+ [
+ path: { "${params.outdir}/bowtie2/build" },
+ mode: params.publish_dir_mode,
+ pattern: 'bowtie2',
+ enabled: params.save_hostremoval_index
+ ]
+ ]
+ }
+
+ // Saving unmapped reads as FQ comes via input channel!
+ withName: BOWTIE2_ALIGN {
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/bowtie2/align" },
+ mode: params.publish_dir_mode,
+ pattern: '*.log'
+ ],
+ [
+ path: { "${params.outdir}/bowtie2/align" },
+ mode: params.publish_dir_mode,
+ pattern: '*.bam',
+ enabled: params.save_hostremoval_bam
+ ],
+ [
+ path: { "${params.outdir}/bowtie2/align" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_hostremoval_unmapped
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ enabled: params.perform_shortread_hostremoval,
+ pattern: '*.fastq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun ) ) && params.perform_shortread_hostremoval && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: MINIMAP2_INDEX {
+ ext.args = '-x map-ont'
+ publishDir = [
+ path: { "${params.outdir}/minimap2/index" },
+ mode: params.publish_dir_mode,
+ pattern: '*.mmi',
+ enabled: params.save_hostremoval_index
+ ]
+ }
+
+ withName: MINIMAP2_ALIGN {
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ path: { "${params.outdir}/minimap2/align" },
+ mode: params.publish_dir_mode,
+ pattern: '*.bam',
+ enabled: params.save_hostremoval_bam
+ ]
+ }
+
+ withName: SAMTOOLS_VIEW {
+ ext.args = '-f 4'
+ ext.prefix = { "${meta.id}_${meta.run_accession}.unmapped" }
+ }
+
+ withName: SAMTOOLS_FASTQ {
+ ext.prefix = { "${meta.id}_${meta.run_accession}.unmapped" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/samtools/fastq" },
+ mode: params.publish_dir_mode,
+ pattern: '*_other.fastq.gz',
+ enabled: params.save_hostremoval_unmapped
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fq.gz',
+ enabled: params.save_analysis_ready_fastqs,
+ saveAs: { ( params.perform_runmerging == false || ( params.perform_runmerging && !meta.is_multirun) ) && params.perform_longread_hostremoval && params.save_analysis_ready_fastqs ? it : null }
+ ]
+ ]
+ }
+
+ withName: SAMTOOLS_STATS {
+ ext.prefix = { "${meta.id}_${meta.run_accession}" }
+ publishDir = [
+ path: { "${params.outdir}/samtools/stats" },
+ mode: params.publish_dir_mode,
+ pattern: '*stats'
+ ]
+ }
+
+ withName: MERGE_RUNS {
+ ext.prefix = { "${meta.id}" }
+ publishDir = [
+ [
+ path: { "${params.outdir}/run_merging/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.save_runmerged_reads
+ ],
+ [
+ path: { "${params.outdir}/analysis_ready_fastqs" },
+ mode: params.publish_dir_mode,
+ pattern: '*.fastq.gz',
+ enabled: params.perform_runmerging && params.save_analysis_ready_fastqs
+ ]
+ ]
+ }
+
+ withName: MALT_RUN {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params} -m ${params.malt_mode}" }
+ // one run with multiple samples, so fix ID to just db name to ensure clean log name
+ ext.prefix = { "${meta.db_name}" }
+ publishDir = [
+ path: { "${params.outdir}/malt/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{rma6,log,sam}'
+ ]
+ }
+
+ withName: 'MEGAN_RMA2INFO_TSV' {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = "-c2c Taxonomy"
+ ext.prefix = { "${meta.id}" }
+ publishDir = [
+ path: { "${params.outdir}/malt/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt.gz,megan}'
+ ]
+ }
+
+ withName: KRAKEN2_KRAKEN2 {
+ tag = { "${meta.db_name}|${meta.tool}|${meta.id}" }
+ ext.args = params.kraken2_save_minimizers ? { "${meta.db_params} --report-minimizer-data" } : { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { meta.tool == "bracken" ? "${meta.id}_${meta.db_name}.bracken" : "${meta.id}_${meta.db_name}.kraken2" } : { meta.tool == "bracken" ? "${meta.id}_${meta.run_accession}_${meta.db_name}.bracken" : "${meta.id}_${meta.run_accession}_${meta.db_name}.kraken2" }
+ publishDir = [
+ path: { "${params.outdir}/kraken2/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt,fastq.gz}'
+ ]
+ }
+
+ withName: KRAKEN2_STANDARD_REPORT {
+ tag = { "${meta.db_name}|${meta.tool}|${meta.id}" }
+ ext.prefix = params.perform_runmerging ? { meta.tool == "bracken" ? "${meta.id}_${meta.db_name}.bracken" : "${meta.id}_${meta.db_name}.kraken2" } : { meta.tool == "bracken" ? "${meta.id}_${meta.run_accession}_${meta.db_name}.bracken" : "${meta.id}_${meta.run_accession}_${meta.db_name}.kraken2" }
+ publishDir = [
+ path: { "${params.outdir}/kraken2/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.report.txt'
+ ]
+ }
+
+ withName: BRACKEN_BRACKEN {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.bracken" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.bracken" }
+ publishDir = [
+ path: { "${params.outdir}/bracken/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*{.tsv,.txt}'
]
+ }
+<<<<<<< HEAD
withName: FASTQC {
ext.args = '--quiet'
+=======
+ withName: BRACKEN_COMBINEBRACKENOUTPUTS {
+ ext.prefix = { "bracken_${meta.id}_combined_reports" }
+ publishDir = [
+ path: { "${params.outdir}/bracken/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.txt'
+ ]
+ }
+
+ withName: KRAKENTOOLS_COMBINEKREPORTS_KRAKEN {
+ ext.prefix = { "kraken2_${meta.db_name}_combined_reports" }
+ publishDir = [
+ path: { "${params.outdir}/kraken2/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.txt'
+ ]
+ }
+
+ withName: KRAKENUNIQ_PRELOADEDKRAKENUNIQ {
+ ext.args = { "${meta.db_params}" }
+ // one run with multiple samples, so fix ID to just db name to ensure clean log name
+ ext.prefix = { "${meta.db_name}.krakenuniq" }
+ publishDir = [
+ path: { "${params.outdir}/krakenuniq/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt,fasta.gz}'
+ ]
+ }
+
+ withName: KRONA_CLEANUP {
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}" }
+ publishDir = [
+ path: { "${params.outdir}/krona/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{html}'
+ ]
+ }
+
+ withName: KRONA_KTIMPORTTEXT {
+ ext.prefix = { "${meta.tool}_${meta.id}" }
+ publishDir = [
+ path: { "${params.outdir}/krona/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{html}'
+ ]
+ }
+
+ withName: 'MEGAN_RMA2INFO_KRONA' {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "--read2class Taxonomy" }
+ ext.prefix = { "${meta.id}_${meta.db_name}" }
+ }
+
+ withName: KRONA_KTIMPORTTAXONOMY {
+ ext.args = "-i"
+ ext.prefix = { "${meta.tool}_${meta.id}" }
+ publishDir = [
+ path: { "${params.outdir}/krona/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{html}'
+ ]
+ }
+
+ withName: METAPHLAN_METAPHLAN {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.metaphlan" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.metaphlan" }
+ publishDir = [
+ path: { "${params.outdir}/metaphlan/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{biom,txt}'
+ ]
+ }
+
+ withName: METAPHLAN_MERGEMETAPHLANTABLES {
+ ext.prefix = { "metaphlan_${meta.id}_combined_reports" }
+ publishDir = [
+ path: { "${params.outdir}/metaphlan/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt}'
+ ]
+ }
+
+ withName: CENTRIFUGE_CENTRIFUGE {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.centrifuge" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.centrifuge" }
+ publishDir = [
+ path: { "${params.outdir}/centrifuge/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt,sam,tab,gz}'
+ ]
+ }
+
+ withName: CENTRIFUGE_KREPORT {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.centrifuge" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.centrifuge" }
+ publishDir = [
+ path: { "${params.outdir}/centrifuge/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt}'
+ ]
+ }
+
+ withName: KRAKENTOOLS_COMBINEKREPORTS_CENTRIFUGE {
+ ext.prefix = { "centrifuge_${meta.id}_combined_reports" }
+ publishDir = [
+ path: { "${params.outdir}/centrifuge/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt}'
+ ]
+ }
+
+ withName: KAIJU_KAIJU {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.kaiju" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.kaiju" }
+ publishDir = [
+ path: { "${params.outdir}/kaiju/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.tsv'
+ ]
+ }
+
+ withName: 'KAIJU_KAIJU2TABLE_SINGLE' {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = {[
+ params.kaiju_expand_viruses ? "-e" : ""
+ ].join(' ').trim() }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.kaijutable" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.kaijutable" }
+ publishDir = [
+ path: { "${params.outdir}/kaiju/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt}'
+ ]
+ }
+
+ withName: 'KAIJU_KAIJU2TABLE_COMBINED' {
+ ext.prefix = { "kaiju_${meta.id}_combined_reports" }
+ publishDir = [
+ path: { "${params.outdir}/kaiju/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{txt}'
+ ]
+ }
+
+ withName: KAIJU_KAIJU2KRONA {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = '-v -u'
+ }
+
+ withName: DIAMOND_BLASTX {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.diamond" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.diamond" }
+ publishDir = [
+ path: { "${params.outdir}/diamond/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{blast,xml,txt,daa,sam,tsv,paf,log}'
+ ]
+ }
+
+ withName: MOTUS_PROFILE {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = {
+ [
+ params.motus_remove_ncbi_ids ? "" : "-p",
+ params.motus_use_relative_abundance ? "" : "-c",
+ params.motus_save_mgc_read_counts ? "-M ${task.ext.prefix}.mgc" : ""
+ ].join(',').replaceAll(','," ")
+ }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}" }
+ publishDir = [
+ path: { "${params.outdir}/motus/${meta.db_name}/" },
+ mode: params.publish_dir_mode
+ ]
+ }
+
+ withName: MOTUS_MERGE {
+ ext.args = { params.standardisation_motus_generatebiom ? "-B" : "" }
+ ext.prefix = { "motus_${meta.id}_combined_reports" }
+ publishDir = [
+ path: { "${params.outdir}/motus/" },
+ mode: params.publish_dir_mode
+ ]
+ }
+
+ withName: KMCP_SEARCH {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.kmcp_search" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.kmcp_search" }
+ publishDir = [
+ path: { "${params.outdir}/kmcp/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ enabled: params.kmcp_save_search
+ ]
+ }
+
+ withName: KMCP_PROFILE {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.kmcp" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.kmcp" }
+ publishDir = [
+ path: { "${params.outdir}/kmcp/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{profile}'
+ ]
+ }
+
+ withName: GANON_CLASSIFY {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = params.ganon_save_readclassifications ? { "${meta.db_params} --output-all --output-lca --output-unclassified" } : { "${meta.db_params}" }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.ganon" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.ganon" }
+ publishDir = [
+ path: { "${params.outdir}/ganon/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{tre,rep,lca,all,unc}'
+ ]
+ }
+
+ withName: GANON_REPORT {
+ tag = {"${meta.db_name}|${meta.id}"}
+ ext.args = {[
+ "--report-type ${params.ganon_report_type}",
+ ganon_report_rank != 'default' ? "--ranks ${params.ganon_report_rank}" : "",
+ "--top-percentile ${params.ganon_report_toppercentile}",
+ "--min-count ${params.ganon_report_mincount}",
+ "--max-count ${params.ganon_report_maxcount}"
+ ].join(' ').trim() }
+ ext.prefix = params.perform_runmerging ? { "${meta.id}_${meta.db_name}.ganon_report" } : { "${meta.id}_${meta.run_accession}_${meta.db_name}.ganon_report" }
+ publishDir = [
+ path: { "${params.outdir}/ganon/${meta.db_name}/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{tre}'
+ ]
+ }
+
+ withName: GANON_TABLE {
+ ext.prefix = { "ganon_${meta.id}_combined_reports" }
+ publishDir = [
+ path: { "${params.outdir}/ganon/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.txt'
+ ]
+ }
+
+ withName: TAXPASTA_MERGE {
+ tag = { "${meta.tool}|${meta.id}" }
+ ext.args = {
+ [
+ "-p ${meta.tool} -o ${meta.tool}_${meta.id}.${params.standardisation_taxpasta_format}",
+ params.taxpasta_add_name ? "--add-name" : "",
+ params.taxpasta_add_rank ? "--add-rank" : "",
+ params.taxpasta_add_lineage ? "--add-lineage" : "",
+ params.taxpasta_add_idlineage ? "--add-id-lineage" : "",
+ params.taxpasta_add_ranklineage ? "--add-rank-lineage" : "",
+ params.taxpasta_ignore_errors ? "--ignore-errors" : ""
+ ].join(' ').trim()
+ }
+ publishDir = [
+ path: { "${params.outdir}/taxpasta/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{tsv,csv,arrow,parquet,biom}'
+ ]
+ }
+
+ withName: TAXPASTA_STANDARDISE {
+ tag = { "${meta.tool}|${meta.id}" }
+ ext.args = {
+ [
+ "-p ${meta.tool} -o ${meta.tool}_${meta.id}.${params.standardisation_taxpasta_format}",
+ params.taxpasta_add_name ? "--add-name" : "",
+ params.taxpasta_add_rank ? "--add-rank" : "",
+ params.taxpasta_add_lineage ? "--add-lineage" : "",
+ params.taxpasta_add_idlineage ? "--add-id-lineage" : ""
+ ].join(' ').trim()
+ }
+ publishDir = [
+ path: { "${params.outdir}/taxpasta/" },
+ mode: params.publish_dir_mode,
+ pattern: '*.{tsv,csv,arrow,parquet,biom}'
+ ]
+>>>>>>> dev
}
withName: CUSTOM_DUMPSOFTWAREVERSIONS {
diff --git a/conf/test.config b/conf/test.config
index 42772cfe..c11f27b6 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -20,10 +20,45 @@ params {
max_time = '6.h'
// Input data
- // TODO nf-core: Specify the paths to your test data on nf-core/test-datasets
- // TODO nf-core: Give any required params for the test so that command line flags are not needed
- input = 'https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv'
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = true
+ perform_longread_qc = true
+ shortread_qc_mergepairs = true
+ perform_shortread_complexityfilter = true
+ perform_shortread_hostremoval = true
+ perform_longread_hostremoval = true
+ perform_runmerging = true
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = true
+ run_kraken2 = true
+ run_bracken = true
+ run_malt = false
+ run_metaphlan = true
+ run_centrifuge = true
+ run_diamond = true
+ run_krakenuniq = true
+ run_motus = false
+ run_ganon = true
+ run_krona = true
+ run_kmcp = true
+ kmcp_mode = 0
+ krona_taxonomy_directory = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/sarscov2/metagenome/krona_taxonomy.tab'
+ malt_save_reads = true
+ kraken2_save_reads = true
+ centrifuge_save_reads = true
+ run_profile_standardisation = true
+}
- // Genome references
- genome = 'R64-1-1'
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
+ }
+ withName: MEGAN_RMA2INFO_TSV {
+ maxForks = 1
+ }
+ withName: MEGAN_RMA2INFO_KRONA {
+ maxForks = 1
+ }
}
diff --git a/conf/test_adapterremoval.config b/conf/test_adapterremoval.config
new file mode 100644
index 00000000..c3422d02
--- /dev/null
+++ b/conf/test_adapterremoval.config
@@ -0,0 +1,52 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+params {
+ config_profile_name = 'Test profile for adapterremoval'
+ config_profile_description = "Minimal test to check the alternative short-read QC function, adapterremoval"
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = true
+ perform_longread_qc = true
+ shortread_qc_tool = 'adapterremoval'
+ perform_shortread_complexityfilter = true
+ perform_shortread_hostremoval = true
+ perform_longread_hostremoval = true
+ perform_runmerging = true
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = true
+ run_kraken2 = true
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = false
+ run_ganon = false
+ run_kmcp = false
+ kmcp_mode = 0
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
+ }
+}
diff --git a/conf/test_bbduk.config b/conf/test_bbduk.config
new file mode 100644
index 00000000..623fe191
--- /dev/null
+++ b/conf/test_bbduk.config
@@ -0,0 +1,52 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+params {
+ config_profile_name = 'Test profile for bbduk'
+ config_profile_description = "Minimal test to check the default tool of short-read complexity filtering, bbduk"
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = true
+ perform_longread_qc = true
+ perform_shortread_complexityfilter = true
+ shortread_complexityfilter_tool = 'bbduk'
+ perform_shortread_hostremoval = true
+ perform_longread_hostremoval = true
+ perform_runmerging = true
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = true
+ run_kraken2 = true
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = false
+ run_ganon = false
+ run_kmcp = false
+ kmcp_mode = 0
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
+ }
+}
diff --git a/conf/test_falco.config b/conf/test_falco.config
new file mode 100644
index 00000000..3fb77c03
--- /dev/null
+++ b/conf/test_falco.config
@@ -0,0 +1,52 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+params {
+ config_profile_name = 'Test profile for Falco'
+ config_profile_description = "Minimal test dataset without performing any preprocessing nor profiling to check pipeline function but running falco instead of fastqc. Useful when you only wish to test a single profiler without having to 'opt-out' of all the others"
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ preprocessing_qc_tool = 'falco'
+ perform_shortread_qc = true
+ perform_longread_qc = true
+ perform_shortread_complexityfilter = false
+ perform_shortread_hostremoval = false
+ perform_longread_hostremoval = false
+ perform_runmerging = false
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = false
+ run_kraken2 = false
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = false
+ run_ganon = false
+ run_kmcp = false
+ kmcp_mode = 0
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
+ }
+}
diff --git a/conf/test_fastp.config b/conf/test_fastp.config
new file mode 100644
index 00000000..3feeae7a
--- /dev/null
+++ b/conf/test_fastp.config
@@ -0,0 +1,53 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+params {
+ config_profile_name = 'Test profile for fastp'
+ config_profile_description = "Minimal test to check the default short-read QC function, fastp"
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = true
+ perform_longread_qc = true
+ shortread_qc_tool = 'fastp'
+ perform_shortread_complexityfilter = true
+ shortread_complexityfilter_tool = 'fastp'
+ perform_shortread_hostremoval = true
+ perform_longread_hostremoval = true
+ perform_runmerging = true
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = true
+ run_kraken2 = true
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = false
+ run_ganon = false
+ run_kmcp = false
+ kmcp_mode = 0
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
+ }
+}
diff --git a/conf/test_full.config b/conf/test_full.config
index 49a10a0f..2a74a80b 100644
--- a/conf/test_full.config
+++ b/conf/test_full.config
@@ -1,12 +1,10 @@
/*
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Nextflow config file for running full-size tests
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines input files and everything required to run a full size pipeline test.
-
Use as follows:
nextflow run nf-core/taxprofiler -profile test_full, --outdir
-
----------------------------------------------------------------------------------------
*/
@@ -15,10 +13,66 @@ params {
config_profile_description = 'Full test dataset to check pipeline function'
// Input data for full size test
- // TODO nf-core: Specify the paths to your full test data ( on nf-core/test-datasets or directly in repositories, e.g. SRA)
- // TODO nf-core: Give any required params for the test so that command line flags are not needed
- input = 'https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_full_illumina_amplicon.csv'
+ input = 'https://github.com/nf-core/test-datasets/raw/taxprofiler/samplesheet_full.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_full_v1.1.csv'
// Genome references
- genome = 'R64-1-1'
+ hostremoval_reference = 'ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/819/615/GCA_000819615.1_ViralProj14015/GCA_000819615.1_ViralProj14015_genomic.fna.gz'
+
+ save_preprocessed_reads = false
+
+ perform_shortread_qc = true
+ shortread_qc_mergepairs = true
+ perform_shortread_complexityfilter = false
+ save_complexityfiltered_reads = false
+
+ perform_longread_qc = true
+ perform_shortread_hostremoval = true
+ perform_longread_hostremoval = true
+ save_hostremoval_index = false
+ save_hostremoval_bam = false
+ save_hostremoval_unmapped = false
+
+ perform_runmerging = true
+ save_runmerged_reads = false
+
+ save_analysis_ready_fastqs = true
+
+ run_centrifuge = true
+ centrifuge_save_reads = false
+
+ run_diamond = true
+
+ run_kaiju = true
+
+ run_kraken2 = true
+ kraken2_save_reads = false
+ kraken2_save_readclassifications = false
+ kraken2_save_minimizers = false
+
+ run_krakenuniq = true
+ krakenuniq_save_reads = false
+ krakenuniq_save_readclassifications = false
+
+ run_bracken = true
+
+ run_malt = true
+ malt_save_reads = false
+ malt_generate_megansummary = true
+
+ run_metaphlan = true
+
+ run_motus = true
+ motus_save_mgc_read_counts = true
+
+ run_ganon = true
+ ganon_save_readclassifications = true
+
+ run_kmcp = true
+ kmcp_save_search = true
+
+ run_profile_standardisation = true
+ run_krona = true
}
+
+cleanup = true
diff --git a/conf/test_krakenuniq.config b/conf/test_krakenuniq.config
new file mode 100644
index 00000000..e93de158
--- /dev/null
+++ b/conf/test_krakenuniq.config
@@ -0,0 +1,68 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+//
+// Separate test as KrakenUniq database can sometimes be too big for GHA
+//
+
+params {
+ config_profile_name = 'Test profile'
+ config_profile_description = 'Minimal test to check KrakenUniq function'
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_krakenuniq.csv'
+ perform_shortread_qc = true
+ perform_longread_qc = true
+ shortread_qc_mergepairs = true
+ perform_shortread_complexityfilter = true
+ perform_shortread_hostremoval = true
+ perform_longread_hostremoval = true
+ perform_runmerging = true
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = false
+ run_kraken2 = false
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = true
+ run_motus = false
+ run_kmcp = false
+ kmcp_mode = 0
+ run_ganon = false
+ run_krona = true
+ krona_taxonomy_directory = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/sarscov2/metagenome/krona_taxonomy.tab'
+ malt_save_reads = false
+ kraken2_save_reads = false
+ centrifuge_save_reads = false
+ diamond_save_reads = false
+ run_profile_standardisation = true
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ }
+ withName: MEGAN_RMA2INFO_TSV {
+ maxForks = 1
+ }
+ withName: MEGAN_RMA2INFO_KRONA {
+ maxForks = 1
+ }
+}
diff --git a/conf/test_malt.config b/conf/test_malt.config
new file mode 100644
index 00000000..7e5f2df3
--- /dev/null
+++ b/conf/test_malt.config
@@ -0,0 +1,54 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+//
+// Separate test for malt
+//
+
+params {
+ config_profile_name = 'Test profile'
+ config_profile_description = 'Minimal test to check malt function'
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://github.com/nf-core/test-datasets/raw/taxprofiler/samplesheet_malt.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = false
+ perform_longread_qc = false
+ perform_shortread_complexityfilter = false
+ perform_shortread_hostremoval = false
+ perform_longread_hostremoval = false
+ perform_runmerging = false
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = false
+ run_kraken2 = false
+ run_bracken = false
+ run_malt = true
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = false
+ run_ganon = false
+ run_kmcp = false
+ kmcp_mode = 0
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ }
+}
diff --git a/conf/test_motus.config b/conf/test_motus.config
new file mode 100644
index 00000000..ef1a2276
--- /dev/null
+++ b/conf/test_motus.config
@@ -0,0 +1,52 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+//
+// Separate test as mOTUs database download can be flaky
+//
+
+params {
+ config_profile_name = 'mOTUs Test profile'
+ config_profile_description = 'Minimal test to check mOTUs function'
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'database_motus.csv'
+ perform_shortread_qc = false
+ perform_longread_qc = false
+ perform_shortread_complexityfilter = false
+ perform_shortread_hostremoval = false
+ perform_longread_hostremoval = false
+ perform_runmerging = false
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = false
+ run_kraken2 = false
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = true
+ run_kmcp = false
+ kmcp_mode = 0
+ run_ganon = false
+ motus_save_mgc_read_counts = false
+ motus_remove_ncbi_ids = false
+ motus_use_relative_abundance = false
+ run_profile_standardisation = true
+}
diff --git a/conf/test_nopreprocessing.config b/conf/test_nopreprocessing.config
new file mode 100644
index 00000000..004a49e8
--- /dev/null
+++ b/conf/test_nopreprocessing.config
@@ -0,0 +1,52 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+params {
+ config_profile_name = 'Test profile'
+ config_profile_description = 'Minimal test dataset skipping all preprocessing to check pipeline function'
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = false
+ perform_longread_qc = false
+ perform_shortread_complexityfilter = false
+ perform_shortread_hostremoval = false
+ perform_longread_hostremoval = false
+ perform_runmerging = false
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = true
+ run_kraken2 = true
+ run_bracken = true
+ run_malt = false // too big with other profiles on GHA
+ run_metaphlan = true
+ run_centrifuge = true
+ run_diamond = true
+ run_krakenuniq = true
+ run_motus = false
+ run_kmcp = true
+ kmcp_mode = 0
+ run_ganon = true
+ run_krona = true
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
+ }
+}
diff --git a/conf/test_noprofiling.config b/conf/test_noprofiling.config
new file mode 100644
index 00000000..7cf2317d
--- /dev/null
+++ b/conf/test_noprofiling.config
@@ -0,0 +1,51 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+params {
+ config_profile_name = 'Test profile'
+ config_profile_description = 'Minimal test dataset without performing any profiling to check pipeline function'
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = true
+ perform_longread_qc = true
+ shortread_qc_mergepairs = true
+ perform_shortread_complexityfilter = true
+ perform_shortread_hostremoval = true
+ perform_longread_hostremoval = true
+ perform_runmerging = true
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = false
+ run_kraken2 = false
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = false
+ run_kmcp = false
+ kmcp_mode = 0
+ run_ganon = false
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ }
+}
diff --git a/conf/test_nothing.config b/conf/test_nothing.config
new file mode 100644
index 00000000..ed247ef4
--- /dev/null
+++ b/conf/test_nothing.config
@@ -0,0 +1,51 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+params {
+ config_profile_name = 'Test profile'
+ config_profile_description = "Minimal test dataset without performing any preprocessing nor profiling to check pipeline function. Useful when you only wish to test a single profiler without having to 'opt-out' of all the others"
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = false
+ perform_longread_qc = false
+ perform_shortread_complexityfilter = false
+ perform_shortread_hostremoval = false
+ perform_longread_hostremoval = false
+ perform_runmerging = false
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = false
+ run_kraken2 = false
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = false
+ run_kmcp = false
+ kmcp_mode = 0
+ run_ganon = false
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
+ }
+}
diff --git a/conf/test_prinseqplusplus.config b/conf/test_prinseqplusplus.config
new file mode 100644
index 00000000..acc23aa8
--- /dev/null
+++ b/conf/test_prinseqplusplus.config
@@ -0,0 +1,52 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Defines input files and everything required to run a fast and simple pipeline test.
+
+ Use as follows:
+ nextflow run nf-core/taxprofiler -profile test, --outdir
+
+----------------------------------------------------------------------------------------
+*/
+
+params {
+ config_profile_name = 'Test profile for prinseqplusplus'
+ config_profile_description = "Minimal test to check the alternative tool of short-read complexity filtering, prinseqplusplus"
+
+ // Limit resources so that this can run on GitHub Actions
+ max_cpus = 2
+ max_memory = '6.GB'
+ max_time = '6.h'
+
+ // Input data
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/samplesheet.csv'
+ databases = 'https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/database_v1.1.csv'
+ perform_shortread_qc = true
+ perform_longread_qc = true
+ perform_shortread_complexityfilter = true
+ shortread_complexityfilter_tool = 'prinseqplusplus'
+ perform_shortread_hostremoval = false
+ perform_longread_hostremoval = false
+ perform_runmerging = false
+ hostremoval_reference = 'https://raw.githubusercontent.com/nf-core/test-datasets/modules/data/genomics/homo_sapiens/genome/genome.fasta'
+ run_kaiju = true
+ run_kraken2 = true
+ run_bracken = false
+ run_malt = false
+ run_metaphlan = false
+ run_centrifuge = false
+ run_diamond = false
+ run_krakenuniq = false
+ run_motus = false
+ run_ganon = false
+ run_kmcp = false
+ kmcp_mode = 0
+}
+
+process {
+ withName: MALT_RUN {
+ maxForks = 1
+ ext.args = { "-m ${params.malt_mode} -J-Xmx12G" }
+ }
+}
diff --git a/docs/images/nf-core-taxprofiler_icon.png b/docs/images/nf-core-taxprofiler_icon.png
new file mode 100644
index 00000000..c639fb67
Binary files /dev/null and b/docs/images/nf-core-taxprofiler_icon.png differ
diff --git a/docs/images/nf-core-taxprofiler_icon.svg b/docs/images/nf-core-taxprofiler_icon.svg
new file mode 100644
index 00000000..24e615ff
--- /dev/null
+++ b/docs/images/nf-core-taxprofiler_icon.svg
@@ -0,0 +1,444 @@
+
+
+
+
diff --git a/docs/images/nf-core-taxprofiler_icon_border.svg b/docs/images/nf-core-taxprofiler_icon_border.svg
new file mode 100644
index 00000000..887e8e82
--- /dev/null
+++ b/docs/images/nf-core-taxprofiler_icon_border.svg
@@ -0,0 +1,445 @@
+
+
+
+
diff --git a/docs/images/nf-core-taxprofiler_logo_custom_dark.png b/docs/images/nf-core-taxprofiler_logo_custom_dark.png
new file mode 100644
index 00000000..6b089fc1
Binary files /dev/null and b/docs/images/nf-core-taxprofiler_logo_custom_dark.png differ
diff --git a/docs/images/nf-core-taxprofiler_logo_custom_dark.svg b/docs/images/nf-core-taxprofiler_logo_custom_dark.svg
new file mode 100644
index 00000000..3d47b4c6
--- /dev/null
+++ b/docs/images/nf-core-taxprofiler_logo_custom_dark.svg
@@ -0,0 +1,2302 @@
+
+
+
+
diff --git a/docs/images/nf-core-taxprofiler_logo_custom_light.png b/docs/images/nf-core-taxprofiler_logo_custom_light.png
new file mode 100644
index 00000000..2dc85b81
Binary files /dev/null and b/docs/images/nf-core-taxprofiler_logo_custom_light.png differ
diff --git a/docs/images/nf-core-taxprofiler_logo_custom_light.svg b/docs/images/nf-core-taxprofiler_logo_custom_light.svg
new file mode 100644
index 00000000..dae1fbe0
--- /dev/null
+++ b/docs/images/nf-core-taxprofiler_logo_custom_light.svg
@@ -0,0 +1,2305 @@
+
+
+
+
diff --git a/docs/images/nf_core_taxprofiler_icon_border.png b/docs/images/nf_core_taxprofiler_icon_border.png
new file mode 100644
index 00000000..c513de0c
Binary files /dev/null and b/docs/images/nf_core_taxprofiler_icon_border.png differ
diff --git a/docs/images/taxprofiler_logo.svg b/docs/images/taxprofiler_logo.svg
new file mode 100644
index 00000000..c9aefbd2
--- /dev/null
+++ b/docs/images/taxprofiler_logo.svg
@@ -0,0 +1,3223 @@
+
+
+
+
diff --git a/docs/images/taxprofiler_tube.pdf b/docs/images/taxprofiler_tube.pdf
new file mode 100644
index 00000000..024d4aca
Binary files /dev/null and b/docs/images/taxprofiler_tube.pdf differ
diff --git a/docs/images/taxprofiler_tube.png b/docs/images/taxprofiler_tube.png
new file mode 100644
index 00000000..b9119af4
Binary files /dev/null and b/docs/images/taxprofiler_tube.png differ
diff --git a/docs/images/taxprofiler_tube.svg b/docs/images/taxprofiler_tube.svg
new file mode 100644
index 00000000..b7d52428
--- /dev/null
+++ b/docs/images/taxprofiler_tube.svg
@@ -0,0 +1,5266 @@
+
+
+
+
diff --git a/docs/output.md b/docs/output.md
index 0287a907..cf4678c3 100644
--- a/docs/output.md
+++ b/docs/output.md
@@ -6,29 +6,61 @@ This document describes the output produced by the pipeline. Most of the plots a
The directories listed below will be created in the results directory after the pipeline has finished. All paths are relative to the top-level results directory.
-
-
## Pipeline overview
The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes data using the following steps:
- [FastQC](#fastqc) - Raw read QC
+- [falco](#fastqc) - Alternative to FastQC for raw read QC
+- [fastp](#fastp) - Adapter trimming for Illumina data
+- [AdapterRemoval](#adapterremoval) - Adapter trimming for Illumina data
+- [Porechop](#porechop) - Adapter removal for Oxford Nanopore data
+- [BBDuk](#bbduk) - Quality trimming and filtering for Illumina data
+- [PRINSEQ++](#prinseq) - Quality trimming and filtering for Illunina data
+- [Filtlong](#filtlong) - Quality trimming and filtering for Nanopore data
+- [Bowtie2](#bowtie2) - Host removal for Illumina reads
+- [minimap2](#minimap2) - Host removal for Nanopore reads
+- [SAMtools stats](#samtools-stats) - Statistics from host removal
+- [SAMtools fastq](#samtools-fastq) - Converts unmapped BAM file to fastq format (minimap2 only)
+- [Analysis Ready Reads](#analysis-read-reads) - Optional results directory containing the final processed reads used as input for classification/profiling.
+- [Bracken](#bracken) - Taxonomic classifier using k-mers and abundance estimations
+- [Kraken2](#kraken2) - Taxonomic classifier using exact k-mer matches
+- [KrakenUniq](#krakenuniq) - Taxonomic classifier that combines the k-mer-based classification and the number of unique k-mers found in each species
+- [Centrifuge](#centrifuge) - Taxonomic classifier that uses a novel indexing scheme based on the Burrows-Wheeler transform (BWT) and the Ferragina-Manzini (FM) index.
+- [Kaiju](#kaiju) - Taxonomic classifier that finds maximum (in-)exact matches on the protein-level.
+- [Diamond](#diamond) - Sequence aligner for protein and translated DNA searches.
+- [MALT](#malt) - Sequence alignment and analysis tool designed for processing high-throughput sequencing data, especially in the context of metagenomics
+- [MetaPhlAn](#metaphlan) - Genome-level marker gene based taxonomic classifier
+- [mOTUs](#motus) - Tool for marker gene-based OTU (mOTU) profiling.
+- [KMCP](#kmcp) - Taxonomic classifier that utilizes genome coverage information by splitting the reference genomes into chunks and stores k-mers in a modified and optimized COBS index for fast alignment-free sequence searching.
+- [ganon](#ganon) - Taxonomic classifier and profile that uses Interleaved Bloom Filters as indices based on k-mers/minimizers.
+- [TAXPASTA](#taxpasta) - Tool to standardise taxonomic profiles as well as merge profiles across samples from the same database and classifier/profiler.
- [MultiQC](#multiqc) - Aggregate report describing results and QC from the whole pipeline
- [Pipeline information](#pipeline-information) - Report metrics generated during the workflow execution
-### FastQC
+![](images/taxprofiler_tube.png)
+
+### FastQC or Falco
Output files
-- `fastqc/`
- - `*_fastqc.html`: FastQC report containing quality metrics.
- - `*_fastqc.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images.
+- `{fastqc,falco}/`
+ - {raw,preprocessed}
+ - `*html`: FastQC or Falco report containing quality metrics in HTML format.
+ - `*.txt`: FastQC or Falco report containing quality metrics in TXT format.
+ - `*.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images (FastQC only).
[FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) gives general quality metrics about your sequenced reads. It provides information about the quality score distribution across your reads, per base sequence content (%A/T/G/C), adapter contamination and overrepresented sequences. For further reading and documentation see the [FastQC help pages](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/).
+If preprocessing is turned on, nf-core/taxprofiler runs FastQC/Falco twice -once before and once after adapter removal/read merging, to allow evaluation of the performance of these preprocessing steps. Note in the General Stats table, the columns of these two instances of FastQC/Falco are placed next to each other to make it easier to evaluate. However, the columns of the actual preprocessing steps (i.e, fastp, AdapterRemoval, and Porechop) will be displayed _after_ the two FastQC/Falco columns, even if they were run 'between' the two FastQC/Falco jobs in the pipeline itself.
+
+:::info
+Falco produces identical output to FastQC but in the `falco/` directory.
+:::
+
![MultiQC - FastQC sequence counts plot](images/mqc_fastqc_counts.png)
![MultiQC - FastQC mean quality scores plot](images/mqc_fastqc_quality.png)
@@ -39,6 +71,542 @@ The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes d
The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They may contain adapter sequence and potentially regions with low quality.
:::
+### fastp
+
+[fastp](https://github.com/OpenGene/fastp) is a FASTQ pre-processing tool for quality control, trimmming of adapters, quality filtering and other features.
+
+It is used in nf-core/taxprofiler for adapter trimming of short-reads.
+
+
+Output files
+
+- `fastp/`
+ - `.fastp.fastq.gz`: File with the trimmed unmerged fastq reads.
+ - `.merged.fastq.gz`: File with the reads that were successfully merged.
+ - `.*{log,html,json}`: Log files in different formats.
+
+
+
+By default nf-core/taxprofiler will only provide the `.fastp.fastq.gz` file if fastp is selected. The file `.merged.fastq.gz` will be available in the output folder if you provide the argument ` --shortread_qc_mergepairs` (optionally retaining un-merged pairs when in combination with `--shortread_qc_includeunmerged`).
+
+You can change the default value for low complexity filtering by using the argument `--shortread_complexityfilter_fastp_threshold`.
+
+### AdapterRemoval
+
+[AdapterRemoval](https://adapterremoval.readthedocs.io/en/stable/) searches for and removes remnant adapter sequences from High-Throughput Sequencing (HTS) data and (optionally) trims low quality bases from the 3' end of reads following adapter removal. It is popular in the field of palaeogenomics. The output logs are stored in the results folder, and as a part of the MultiQC report.
+
+
+Output files
+
+- `adapterremoval/`
+ - `.settings`: AdapterRemoval log file containing general adapter removal, read trimming and merging statistics
+ - `.collapsed.fastq.gz` - read-pairs that merged and did not undergo trimming (only when `--shortread_qc_mergepairs` supplied)
+ - `.collapsed.truncated.fastq.gz` - read-pairs that merged underwent quality trimming (only when `--shortread_qc_mergepairs` supplied)
+ - `.pair1.truncated.fastq.gz` - read 1 of pairs that underwent quality trimming
+ - `.pair2.truncated.fastq.gz` - read 2 of pairs that underwent quality trimming (and could not merge if `--shortread_qc_mergepairs` supplied)
+ - `.singleton.truncated.fastq.gz` - orphaned read pairs where one of the pair was discarded
+ - `.discard.fastq.gz` - reads that were discarded due to length or quality filtering
+
+
+
+By default nf-core/taxprofiler will only provide the `.settings` file if AdapterRemoval is selected.
+
+You will only find the `.fastq` files in the results directory if you provide ` --save_preprocessed_reads`. If this is selected, you may receive different combinations of `.fastq` files for each sample depending on the input types - e.g. whether you have merged or not, or if you're supplying both single- and paired-end reads. Alternatively, if you wish only to have the 'final' reads that go into classification/profiling (i.e., that may have additional processing), do not specify this flag but rather specify `--save_analysis_ready_reads`, in which case the reads will be in the folder `analysis_ready_reads`.
+
+:::warning
+The resulting `.fastq` files may _not_ always be the 'final' reads that go into taxprofiling, if you also run other steps such as complexity filtering, host removal, run merging etc..
+:::
+
+### Porechop
+
+[Porechop](https://github.com/rrwick/Porechop) is a tool for finding and removing adapters from Oxford Nanopore reads. Adapters on the ends of reads are trimmed and if a read has an adapter in its middle, it is considered a chimeric and it chopped into separate reads.
+
+
+Output files
+
+- `porechop/`
+ - `.log`: Log file containing trimming statistics
+ - `.fastq.gz`: Adapter-trimmed file
+
+
+
+The output logs are saved in the output folder and are part of MultiQC report.You do not normally need to check these manually.
+
+You will only find the `.fastq` files in the results directory if you provide ` --save_preprocessed_reads`. Alternatively, if you wish only to have the 'final' reads that go into classification/profiling (i.e., that may have additional processing), do not specify this flag but rather specify `--save_analysis_ready_reads`, in which case the reads will be in the folder `analysis_ready_reads`.
+
+:::warning
+We do **not** recommend using Porechop if you are already trimming the adapters with ONT's basecaller Guppy.
+:::
+
+### BBDuk
+
+[BBDuk](https://jgi.doe.gov/data-and-tools/software-tools/bbtools/bb-tools-user-guide/bbduk-guide/) stands for Decontamination Using Kmers. BBDuk was developed to combine most common data-quality-related trimming, filtering, and masking operations into a single high-performance tool.
+
+It is used in nf-core/taxprofiler for complexity filtering using different algorithms. This means that it will remove reads with low sequence diversity (e.g. mono- or dinucleotide repeats).
+
+
+Output files
+
+- `bbduk/`
+ - `.bbduk.log`: log file containing filtering statistics
+ - `.fastq.gz`: resulting FASTQ file without low-complexity reads
+
+
+
+By default nf-core/taxprofiler will only provide the `.log` file if BBDuk is selected as the complexity filtering tool. You will only find the complexity filtered reads in your results directory if you provide ` --save_complexityfiltered_reads`. Alternatively, if you wish only to have the 'final' reads that go into classification/profiling (i.e., that may have additional processing), do not specify this flag but rather specify `--save_analysis_ready_reads`, in which case the reads will be in the folder `analysis_ready_reads`.
+
+:::warning
+The resulting `.fastq` files may _not_ always be the 'final' reads that go into taxprofiling, if you also run other steps such as host removal, run merging etc..
+:::
+
+### PRINSEQ++
+
+[PRINSEQ++](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus) is a C++ implementation of the [prinseq-lite.pl](https://prinseq.sourceforge.net/) program. It can be used to filter, reformat or trim genomic and metagenomic sequence data.
+
+It is used in nf-core/taxprofiler for complexity filtering using different algorithms. This means that it will remove reads with low sequence diversity (e.g. mono- or dinucleotide repeats).
+
+
+Output files
+
+- `prinseqplusplus/`
+ - `.log`: log file containing number of reads. Row IDs correspond to: `min_len, max_len, min_gc, max_gc, min_qual_score, min_qual_mean, ns_max_n, noiupac, derep, lc_entropy, lc_dust, trim_tail_left, trim_tail_right, trim_qual_left, trim_qual_right, trim_left, trim_right`
+ - `_good_out.fastq.gz`: resulting FASTQ file without low-complexity reads
+
+
+
+By default nf-core/taxprofiler will only provide the `.log` file if PRINSEQ++ is selected as the complexity filtering tool. You will only find the complexity filtered `.fastq` files in your results directory if you supply ` --save_complexityfiltered_reads`. Alternatively, if you wish only to have the 'final' reads that go into classification/profiling (i.e., that may have additional processing), do not specify this flag but rather specify `--save_analysis_ready_reads`, in which case the reads will be in the folder `analysis_ready_reads`.
+
+:::warning
+The resulting `.fastq` files may _not_ always be the 'final' reads that go into taxprofiling, if you also run other steps such as host removal, run merging etc..
+:::
+
+### Filtlong
+
+[Filtlong](https://github.com/rrwick/Filtlong) is a quality filtering tool for long reads. It can take a set of small reads and produce a smaller, better subset.
+
+
+Output files
+
+- `filtlong/`
+ - `_filtered.fastq.gz`: Quality or short read data filtered file
+ - `_filtered.log`: log file containing summary statistics
+
+
+
+You will only find the `.fastq` files in the results directory if you provide ` --save_preprocessed_reads`. Alternatively, if you wish only to have the 'final' reads that go into classification/profiling (i.e., that may have additional processing), do not specify this flag but rather specify `--save_analysis_ready_reads`, in which case the reads will be in the folder `analysis_ready_reads`.
+
+:::warning
+We do _not_ recommend using Filtlong if you are performing filtering of low quality reads with ONT's basecaller Guppy.
+:::
+
+### Bowtie2
+
+[Bowtie 2](https://bowtie-bio.sourceforge.net/bowtie2/index.shtml) is an ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. It is particularly good at aligning reads of about 50 up to 100s or 1,000s of characters, and particularly good at aligning to relatively long (e.g. mammalian) genomes.
+
+It is used with nf-core/taxprofiler to allow removal of 'host' (e.g. human) and/or other possible contaminant reads (e.g. Phi X) from short-read `.fastq` files prior to profiling.
+
+
+Output files
+
+- `bowtie2/`
+ - `build/`
+ - `*.bt2`: Bowtie2 indicies of reference genome, only if `--save_hostremoval_index` supplied.
+ - `align/`
+ - `.bam`: BAM file containing reads that aligned against the user-supplied reference genome as well as unmapped reads
+ - `.bowtie2.log`: log file about the mapped reads
+ - `.unmapped.fastq.gz`: the off-target reads from the mapping that is used in downstream steps.
+
+
+
+By default nf-core/taxprofiler will only provide the `.log` file if host removal is turned on. You will only have a `.bam` file if you specify `--save_hostremoval_bam`. This will contain _both_ mapped and unmapped reads. You will only get FASTQ files if you specify to save `--save_hostremoval_unmapped` - these contain only unmapped reads. Alternatively, if you wish only to have the 'final' reads that go into classification/profiling (i.e., that may have additional processing), do not specify this flag but rather specify `--save_analysis_ready_reads`, in which case the reads will be in the folder `analysis_ready_reads`.
+
+:::info
+Unmapped reads in FASTQ are only found in this directory for short-reads, for long-reads see [`samtools/fastq/`](#samtools-fastq).
+:::
+
+:::info
+The resulting `.fastq` files may _not_ always be the 'final' reads that go into taxprofiling, if you also run other steps such as run merging etc..
+:::
+
+:::info
+While there is a dedicated section in the MultiQC HTML for Bowtie2, these values are not displayed by default in the General Stats table. Rather, alignment statistics to host genome is reported via samtools stats module in MultiQC report for direct comparison with minimap2 (see below).
+:::
+
+### minimap2
+
+[minimap2](https://github.com/lh3/minimap2) is an alignment tool suited to mapping long reads to reference sequences.
+
+It is used with nf-core/taxprofiler to allow removal of 'host' (e.g. human) or other possible contaminant reads from long-read `.fastq` files prior to taxonomic classification/profiling.
+
+
+Output files
+
+- `minimap2/`
+ - `build/`
+ - `*.mmi2`: minimap2 indices of reference genome, only if `--save_hostremoval_index` supplied.
+ - `align/`
+ - `.bam`: Alignment file in BAM format containing both mapped and unmapped reads.
+
+
+
+By default, nf-core/taxprofiler will only provide the `.bam` file containing mapped and unmapped reads if saving of host removal for long reads is turned on via `--save_hostremoval_bam`.
+
+:::info
+minimap2 is not yet supported as a module in MultiQC and therefore there is no dedicated section in the MultiQC HTML. Rather, alignment statistics to host genome is reported via samtools stats module in MultiQC report.
+:::
+
+:::info
+Unlike Bowtie2, minimap2 does not produce an unmapped FASTQ file by itself. See [`samtools/fastq`](#samtools-fastq).
+:::
+
+### SAMtools fastq
+
+[SAMtools fastq](http://www.htslib.org/doc/1.1/samtools.html) converts a `.sam`, `.bam`, or `.cram` alignment file to FASTQ format
+
+
+Output files
+
+- `samtools/stats/`
+ - `_interleaved.fq.gz`: Unmapped reads only in FASTQ gzip format
+
+
+
+This directory will be present and contain the unmapped reads from the `.fastq` format from long-read minimap2 host removal, if `--save_hostremoval_unmapped` is supplied. Alternatively, if you wish only to have the 'final' reads that go into classification/profiling (i.e., that may have additional processing), do not specify this flag but rather specify `--save_analysis_ready_reads`, in which case the reads will be in the folder `analysis_ready_reads`.
+
+:::info
+For short-read unmapped reads, see [bowtie2](#bowtie2).
+:::
+
+### Analysis Ready Reads
+
+:::info
+This optional results directory will only be present in the pipeline results when supplying `--save_analysis_ready_reads`.
+:::
+
+
+Output files
+
+- `samtools/stats/`
+ - `_{fq,fastq}.gz`: Final reads that underwent preprocessing and were sent for classification/profiling.
+
+
+
+The results directory will contain the 'final' processed reads used as input for classification/profiling. It will _only_ include the output of the _last_ step of any combinations of preprocessing steps that may have been specified in the run configuration. For example, if you perform the read QC and host-removal preprocessing steps, the final reads that are sent to classification/profiling are the host-removed FASTQ files - those will be the ones present in this directory.
+
+:::warning
+If you turn off all preprocessing steps, then no results will be present in this directory. This happens independently for short- and long-reads. I.e. you will only have FASTQ files for short reads in this directory if you skip all long-read preprocessing.
+:::
+
+### SAMtools stats
+
+[SAMtools stats](http://www.htslib.org/doc/samtools-stats.html) collects statistics from a `.sam`, `.bam`, or `.cram` alignment file and outputs in a text format.
+
+
+Output files
+
+- `samtools/stats/`
+ - `.stats`: File containing samtools stats output.
+
+
+
+In most cases you do not need to check this file, as it is rendered in the MultiQC run report.
+
+### Run Merging
+
+nf-core/taxprofiler offers the option to merge FASTQ files of multiple sequencing runs or libraries that derive from the same sample, as specified in the input samplesheet.
+
+This is the last possible preprocessing step, so if you have multiple runs or libraries (and run merging turned on), this will represent the final reads that will go into classification/profiling steps.
+
+
+Output files
+
+- `run_merging/`
+ - `*.fastq.gz`: Concatenated FASTQ files on a per-sample basis
+
+
+
+Note that you will only find samples that went through the run merging step in this directory. For samples that had a single run or library will not go through this step of the pipeline and thus will not be present in this directory.
+
+This directory and its FASTQ files will only be present if you supply `--save_runmerged_reads`.Alternatively, if you wish only to have the 'final' reads that go into classification/profiling (i.e., that may have additional processing), do not specify this flag but rather specify `--save_analysis_ready_reads`, in which case the reads will be in the folder `analysis_ready_reads`.
+
+### Bracken
+
+[Bracken](https://ccb.jhu.edu/software/bracken/) (Bayesian Reestimation of Abundance with Kraken) is a highly accurate statistical method that computes the abundance of species in DNA sequences from a metagenomics sample. Braken uses the taxonomy labels assigned by Kraken, a highly accurate metagenomics classification algorithm, to estimate the number of reads originating from each species present in a sample.
+
+:::info
+The first step of using Bracken requires running Kraken2, therefore the initial results before abundance estimation will be found in `/kraken2/`.
+:::
+
+
+Output files
+
+- `bracken/`
+ - `/`
+ - `bracken__combined_reports.txt`: combined bracken results as output from Bracken's `combine_bracken_outputs.py` script
+ - `/`
+ - `_.tsv`: TSV file containing per-sample summary of Bracken results with abundance information
+ - `_.report_bracken_species.txt`: Kraken2 style report with Bracken abundance information
+
+
+
+The main taxonomic profiling file from Bracken is the `*.tsv` file. This provides the basic results from Kraken2 but with the corrected abundance information. Note that the raw Kraken2 version of the upstream step of Bracken can be found in the `kraken2/` directory with the suffix of `_.bracken.report.txt` (with a 6 column variant when `--save_minimizers` specified).
+
+### Kraken2
+
+[Kraken](https://ccb.jhu.edu/software/kraken2/) is a taxonomic sequence classifier that assigns taxonomic labels to DNA sequences. Kraken examines the k-mers within a query sequence and uses the information within those k-mers to query a database. That database maps -mers to the lowest common ancestor (LCA) of all genomes known to contain a given k-mer.
+
+
+Output files
+
+- `kraken2/`
+ - `_combined_reports.txt`: A combined profile of all samples aligned to a given database (as generated by `krakentools`)
+ - If you have also run Bracken, the original Kraken report (i.e., _before_ read re-assignment) will also be included in this directory with `-bracken` suffixed to your Bracken database name. For example: `kraken2--bracken.tsv`. However in most cases you want to use the actual Bracken file (i.e., `bracken_.tsv`).
+ - `/`
+ - `_.classified.fastq.gz`: FASTQ file containing all reads that had a hit against a reference in the database for a given sample
+ - `_.unclassified.fastq.gz`: FASTQ file containing all reads that did not have a hit in the database for a given sample
+ - `_.report.txt`: A Kraken2 report that summarises the fraction abundance, taxonomic ID, number of Kmers, taxonomic path of all the hits in the Kraken2 run for a given sample. Will be 6 column rather than 8 if `--save_minimizers` specified.
+ - `_.classifiedreads.txt`: A list of read IDs and the hits each read had against each database for a given sample
+
+
+
+The main taxonomic classification file from Kraken2 is the `_combined_reports.txt` or `*report.txt` file. The former provides you the broadest over view of the taxonomic classification results across all samples against a single database, where you get two columns for each sample e.g. `2_all` and `2_lvl`, as well as a summarised column summing up across all samples `tot_all` and `tot_lvl`. The latter gives you the most information for a single sample. The report file is also used for the taxpasta step.
+
+You will only receive the `.fastq` and `*classifiedreads.txt` file if you supply `--kraken2_save_reads` and/or `--kraken2_save_readclassifications` parameters to the pipeline.
+
+### KrakenUniq
+
+[KrakenUniq](https://github.com/fbreitwieser/krakenuniq) (formerly KrakenHLL) is an extension to the fast k-mer-based classification performed by [Kraken](https://github.com/DerrickWood/kraken) with an efficient algorithm for additionally assessing the coverage of unique k-mers found in each species in a dataset.
+
+
+Output files
+
+- `krakenuniq/`
+ - `/`
+ - `_[.merged].classified.fasta.gz`: Optional FASTA file containing all reads that had a hit against a reference in the database for a given sample. Paired-end input reads are merged in this output.
+ - `_[.merged].unclassified.fasta.gz`: Optional FASTA file containing all reads that did not have a hit in the database for a given sample. Paired-end input reads are merged in this output.
+ - `_.krakenuniq.report.txt`: A Kraken2-style report that summarises the fraction abundance, taxonomic ID, number of Kmers, taxonomic path of all the hits, with an additional column for k-mer coverage, that allows for more accurate distinguishing between false-positive/true-postitive hits.
+ - `_.krakenuniq.classified.txt`: An optional list of read IDs and the hits each read had against each database for a given sample.
+
+
+
+The main taxonomic classification file from KrakenUniq is the `*.krakenuniq.report.txt` file. This is an extension of the Kraken2 report with the additional k-mer coverage information that provides more information about the accuracy of hits.
+
+You will only receive the `.fasta.gz` and `*.krakenuniq.classified.txt` file if you supply `--krakenuniq_save_reads` and/or `--krakenuniq_save_readclassification` parameters to the pipeline.
+
+:::info
+The output system of KrakenUniq can result in other `stdout` or `stderr` logging information being saved in the report file, therefore you must check your report files before downstream use!
+:::
+
+### Centrifuge
+
+[Centrifuge](https://github.com/DaehwanKimLab/centrifuge) is a taxonomic sequence classifier that uses a Burrows-Wheeler transform and Ferragina-Manzina index for storing and mapping sequences.
+
+
+Output files
+
+- `centrifuge/`
+ - `/`
+ - `.centrifuge.mapped.fastq.gz`: `FASTQ` files containing all mapped reads
+ - `.centrifuge.report.txt`: A classification report that summarises the taxonomic ID, the taxonomic rank, length of genome sequence, number of classified and uniquely classified reads
+ - `.centrifuge.results.txt`: A file that summarises the classification assignment for a read, i.e read ID, sequence ID, score for the classification, score for the next best classification, number of classifications for this read
+ - `.centrifuge.txt`: A Kraken2-style report that summarises the fraction abundance, taxonomic ID, number of k-mers, taxonomic path of all the hits in the centrifuge run for a given sample
+ - `.centrifuge.unmapped.fastq.gz`: FASTQ file containing all unmapped reads
+
+
+
+The main taxonomic classification files from Centrifuge are the `_combined_reports.txt`, `*report.txt`, `*results.txt` and the `*centrifuge.txt`. The latter is used by the taxpasta step. You will receive the `.fastq` files if you supply `--centrifuge_save_reads`.
+
+### Kaiju
+
+[Kaiju](https://github.com/bioinformatics-centre/kaiju) is a taxonomic classifier that finds maximum exact matches on the protein-level using the Burrows-Wheeler transform.
+
+
+Output files
+
+- `kaiju/`
+ - `kaiju__combined_reports.txt`: A combined profile of all samples aligned to a given database (as generated by kaiju2table)
+ - `/`
+ - `_.kaiju.tsv`: Raw output from Kaiju with taxonomic rank, read ID and taxonic ID
+ - `_.kaijutable.txt`: Summarised Kaiju output with fraction abundance, taxonomic ID, number of reads, and taxonomic names (as generated by `kaiju2table`)
+
+
+
+The most useful summary file is the `_combined_reports.txt` file which summarises hits across all reads and samples. Separate per-sample versions summaries can be seen in `/*.txt`. However if you wish to look at more precise information on a per-read basis, see the `*tsv` file. The default taxonomic rank is `species`. You can provide a different one by updating the argument `--kaiju_taxon_rank`.
+
+### DIAMOND
+
+[DIAMOND](https://github.com/bbuchfink/diamond) is a sequence aligner for translated DNA searches or protein sequences against a protein reference database such as NR. It is a replacement for the NCBI BLAST software tools.It has many key features and it is used as taxonomic classifier in nf-core/taxprofiler.
+
+
+Output files
+
+- `diamond/`
+ - `/`
+ - `.log`: A log file containing stdout information
+ - `*.{blast,xml,txt,daa,sam,tsv,paf}`: A file containing alignment information in various formats, or taxonomic information in a text-based format. Exact output depends on user choice.
+
+
+
+By default you will receive a TSV output. Alternatively, you will receive a `*.sam` file if you provide the parameter `--diamond_save_reads` but in this case no taxonomic classification will be available(!), only the aligned reads in sam format.
+
+:::info
+DIAMOND has many output formats, so depending on your [choice](https://github.com/bbuchfink/diamond/wiki/3.-Command-line-options) with ` --diamond_output_format` you will receive the taxonomic information in a different format.
+:::
+
+### MALT
+
+[MALT](https://software-ab.cs.uni-tuebingen.de/download/malt) is a fast replacement for BLASTX, BLASTP and BLASTN, and provides both local and semi-global alignment capabilities.
+
+
+Output files
+
+- `malt/`
+ - `/`
+ - `.blastn.sam`: sparse SAM file containing alignments of each hit
+ - `.megan`: summary file that can be loaded into the [MEGAN6](https://uni-tuebingen.de/fakultaeten/mathematisch-naturwissenschaftliche-fakultaet/fachbereiche/informatik/lehrstuehle/algorithms-in-bioinformatics/software/megan6/) interactive viewer. Generated by MEGAN6 companion tool `rma2info`
+ - `.rma6`: binary file containing all alignments and taxonomic information of hits that can be loaded into the [MEGAN6](https://uni-tuebingen.de/fakultaeten/mathematisch-naturwissenschaftliche-fakultaet/fachbereiche/informatik/lehrstuehle/algorithms-in-bioinformatics/software/megan6/) interactive viewer
+ - `.txt.gz`: text file containing taxonomic IDs and read counts against each taxon. Generated by MEGAN6 companion tool `rma2info`
+
+
+
+The main output of MALT is the `.rma6` file format, which can be only loaded into MEGAN and it's related tools. We provide the `rma2info` text files for improved compatibility with spreadsheet programs and other programmtic data manipulation tools, however this has only limited information compared to the 'binary' RMA6 file format (the `.txt` file only contains taxonomic ID and count, whereas RMA6 has taxonomic lineage information).
+
+You will only receive the `.sam` and `.megan` files if you supply `--malt_save_reads` and/or `--malt_generate_megansummary` parameters to the pipeline.
+
+### MetaPhlAn
+
+[MetaPhlAn](https://github.com/biobakery/metaphlan) is a computational tool for profiling the composition of microbial communities (Bacteria, Archaea and Eukaryotes) from metagenomic shotgun sequencing data (i.e. not 16S) with species-level resolution via marker genes.
+
+
+Output files
+
+- `metaphlan/`
+ - `metaphlan__combined_reports.txt`: A combined profile of all samples aligned to a given database (as generated by `metaphlan_merge_tables`)
+ - `/`
+ - `.biom`: taxonomic profile in BIOM format
+ - `.bowtie2out.txt`: BowTie2 alignment information (can be re-used for skipping alignment when re-running MetaPhlAn with different parameters)
+ - `_profile.txt`: MetaPhlAn taxonomic profile including abundance estimates
+
+
+
+The output contains a file named `*_combined_reports.txt`, which provides an overview of the classification results for all samples. The main taxonomic profiling file from MetaPhlAn is the `*_profile.txt` file. This provides the abundance estimates from MetaPhlAn however does not include raw counts by default. Additionally, it contains intermediate Bowtie2 output `.bowtie2out.txt`, which presents a condensed representation of the mapping results of your sequencing reads to MetaPhlAn's marker gene sequences. The alignments are listed in tab-separated columns, including Read ID and Marker Gene ID, with each alignment represented on a separate line.
+
+### mOTUs
+
+[mOTUS](https://github.com/motu-tool/mOTUs) is a taxonomic profiler that maps reads to a unique marker specific database and estimates the relative abundance of known and unknown species.
+
+
+Output files
+
+- `motus/`
+ - `/`
+ - `.log`: A log file that contains summary statistics
+ - `.out`: A classification file that summarises taxonomic identifiers, by default at the rank of mOTUs (i.e., species level), and their relative abundances in the profiled sample.
+ - `motus__combined_reports.txt`: A combined profile of all samples aligned to a given database (as generated by `motus_merge`)
+
+
+
+Normally `*_combined_reports.txt` is the most useful file for downstream analyses, but the per sample `.out` file can provide additional more specific information. By default, nf-core/taxprofiler is providing a column describing NCBI taxonomic ID as this is used in the taxpasta step. You can disable this column by activating the argument `--motus_remove_ncbi_ids`.
+You will receive the relative abundance instead of read counts if you provide the argument `--motus_use_relative_abundance`.
+
+### KMCP
+
+[KMCP](https://github.com/shenwei356/kmcp) utilises genome coverage information by splitting the reference genomes into chunks and stores k-mers in a modified and optimised COBS index for fast alignment-free sequence searching. KMCP combines k-mer similarity and genome coverage information to reduce the false positive rate of k-mer-based taxonomic classification and profiling methods.
+
+
+Output files
+
+- `kmcp/`
+
+ - `/`
+ - `.gz`: output of `kmcp_search` containing search sequences against a database in tab-delimited format with 15 columns.
+ - `_kmcp.profile`: output of `kmcp_profile` containing the taxonomic profile from search results.
+
+
+
+You will receive the `.gz` file if you supply `--kmcp_save_search`. Please note that there is no taxonomic label assignment in this output file.
+
+The main taxonomic classification file from KMCP is the `*kmcp.profile` which is also used by the taxpasta step.
+
+### ganon
+
+[ganon](https://pirovc.github.io/ganon/) is designed to index large sets of genomic reference sequences and to classify reads against them efficiently. The tool uses Interleaved Bloom Filters as indices based on k-mers/minimizers. It was mainly developed, but not limited, to the metagenomics classification problem: quickly assign sequence fragments to their closest reference among thousands of references. After classification, taxonomic abundance is estimated and reported.
+
+
+Output files
+
+- `ganon/`
+
+ - `/`
+
+ - `_report.tre`: output of `ganon report` containing taxonomic classifications with possible formatting and/or filtering depending on options specified.
+ - ``.tre: output of `ganon classify` containing raw taxonomic classifications and abundance estimations with no additional formatting or filtering.
+ - ``.rep: 'raw' report of counts against each taxon.
+ - ``.all: per-read summary of all hits of each reads.
+ - ``.lca: per-read summary of the best single hit after LCA for each read.
+ - ``.unc: list of read IDs with no hits.
+ - ``.log: the stdout console messages printed by `ganon classify`, containing some classification summary information
+
+ - `ganon__combined_reports.txt`: A combined profile of all samples aligned to a given database (as generated by `ganon table`)
+
+
+
+Generally you will want to refer to the `combined_reports.txt` or `_report.tre` file. For further descriptions of the contents of each file, see the [ganon documentation](https://pirovc.github.io/ganon/outputfiles/).
+
+You will only receive the `.all`, `.lca`, and `.unc` files if you supply the `--ganon_save_readclassifications` parameter to the pipeline.
+
+### Krona
+
+[Krona](https://github.com/marbl/Krona) allows the exploration of (metagenomic) hierarchical data with interactive zooming, multi-layered pie charts.
+
+Krona charts will be generated by the pipeline for supported tools (Kraken2, Centrifuge, Kaiju, and MALT)
+
+
+Output files
+
+- `krona/`
+ - `_.html`: per-tool/per-database interactive HTML file containing hierarchical piecharts
+
+
+
+The resulting HTML files can be loaded into your web browser for exploration. Each file will have a dropdown to allow you to switch between each sample aligned against the given database of the tool.
+
+### TAXPASTA
+
+[TAXPASTA](https://github.com/taxprofiler/taxpasta) standardises and optionally merges two or more taxonomic profiles across samples into one single table. It supports multiple different classifiers simplifying comparison of taxonomic classification results between tools and databases.
+
+
+Output files
+
+- `taxpasta/`
+
+ - `_*.{tsv,csv,arrow,parquet,biom}`: Standardised taxon table containing multiple samples. The standard format is the `tsv`.
+ - The first column describes the taxonomy ID and the rest of the columns describe the read counts for each sample.
+ - Note that the file naming scheme will apply regardless of whether `TAXPASTA_MERGE` (multiple sample run) or `TAXPASTA_STANDARDISE` (single sample run) are executed.
+ - If you have also run Bracken, the initial Kraken report (i.e., _before_ read re-assignment) will also be included in this directory with `-bracken` suffixed to your Bracken database name. For example: `kraken2--bracken.tsv`. However in most cases you want to use the actual Bracken file (i.e., `bracken_.tsv`).
+
+
+
+By providing the path to a directory containing taxdump files to `--taxpasta_taxonomy_dir`, the taxon name, the taxon rank, the taxon's entire lineage including taxon names and/or the taxon's entire lineage including taxon identifiers can also be added in the output in addition to just the taxon ID. Addition of this extra information can be turned by using the parameters `--taxpasta_add_name`, `--taxpasta_add_rank`, `--taxpasta_add_lineage` and `--taxpasta_add_idlineage` respectively.
+
+These files will likely be the most useful files for the comparison of differences in classification between different tools or building consensuses, with the caveat they have slightly less information than the actual output from each tool (which may have non-standard information e.g. taxonomic rank, percentage of hits, abundance estimations).
+
+The following report files are used for the taxpasta step:
+
+- Bracken: `_.tsv` Taxpasta used the `new_est_reads` column for the standardised profile.
+- Centrifuge: `.centrifuge.txt` Taxpasta uses the `direct_assigned_reads` column for the standardised profile.
+- Diamond: `` Taxpasta summarises number of reads per NCBI taxonomy ID standardised profile.
+- Kaiju: `_.kaijutable.txt` Taxpasta uses the `reads` column from kaiju2table standardised profile.
+- KrakenUniq: `_.report.txt` Taxpasta uses the `reads` column for the standardised profile.
+- Kraken2: `_.report.txt` Taxpasta uses the `direct_assigned_reads` column for the standardised profile.
+- MALT: `.txt.gz` Taxpasta uses the `count` (second) column from the output of MEGAN6's rma2info for the standardised profile.
+- MetaPhlAn: `_profile.txt` Taxpasta uses the `relative_abundance` column multiplied with a fixed number to yield an integer for the standardised profile.
+- mOTUs: `.out` Taxpasta uses the `read_count` column for the standardised profile.
+
+:::warning
+Please aware the outputs of each tool's standardised profile _may not_ be directly comparable between each tool. Some may report raw read counts, whereas others may report abundance information. Please always refer to the list above, for which information is used for each tool.
+:::
+
### MultiQC
@@ -55,6 +623,32 @@ The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They m
Results generated by MultiQC collate pipeline QC from supported tools e.g. FastQC. The pipeline has special steps which also allow the software versions to be reported in the MultiQC output for future traceability. For more information about how to use MultiQC reports, see .
+All tools in taxprofiler supported by MultiQC will have a dedicated section showing summary statistics of each tool based on information stored in log files.
+
+You can expect in the MultiQC reports either sections and/or general stats columns for the following tools:
+
+- fastqc
+- adapterRemoval
+- fastp
+- bbduk
+- prinseqplusplus
+- porechop
+- filtlong
+- bowtie2
+- minimap2
+- samtools (stats)
+- kraken
+- bracken
+- centrifuge
+- kaiju
+- diamond
+- malt
+- motus
+
+:::info
+The 'General Stats' table by default will only show statistics referring to pre-processing steps, and will not display possible values from each classifier/profiler, unless turned on by the user within the 'Configure Columns' menu or via a custom MultiQC config file (`--multiqc_config`)
+:::
+
### Pipeline information
diff --git a/docs/usage.md b/docs/usage.md
index 286da89c..dc398350 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -6,62 +6,178 @@
## Introduction
-
+nf-core/taxprofiler is a pipeline for highly-parallelised taxonomic classification and profiling of shotgun metagenomic data across multiple tools simultaneously. In addition to multiple classification and profiling tools, at the same time it allows you to performing taxonomic classification and profiling across multiple databases and settings per tool, as well as produces standardised output tables to allow immediate cross comparison of results between tools.
-## Samplesheet input
+In addition to this page, you can find additional usage information on the following pages:
-You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row as shown in the examples below.
+- [Tutorials](usage/tutorials.md)
+- [FAQ and Troubleshooting](usage/faq-troubleshooting.md)
-```bash
---input '[path to samplesheet file]'
+## General Usage
+
+To run nf-core/taxprofiler, at a minimum two you require two inputs:
+
+- a sequencing read samplesheet
+- a database samplesheet
+
+Both contain metadata and paths to the data of your input samples and databases.
+
+When running nf-core/taxprofiler, every step and tool is 'opt in'. To run a given classifier or profiler you must make sure to supply both a database in your `.csv` and supply `--run_` flag to your command. Omitting either will result in the profiling tool not executing.
+
+nf-core/taxprofiler also includes optional pre-processing (adapter clipping, merge running etc.) or post-processing (visualisation) steps. These are also opt in with a `--perform_` flag. In some cases, the pre- and post-processing steps may also require additional files. Please check the parameters tab of this documentation for more information.
+
+Please see the rest of this page for information about how to prepare input samplesheets and databases and how to run Nextflow pipelines. See the [parameters](https://nf-co.re/taxprofiler/parameters) documentation for more information about specific options the pipeline also offers.
+
+## Samplesheet inputs
+
+nf-core/taxprofiler can accept as input raw or preprocessed single- or paired-end short-read (e.g. Illumina) FASTQ files, long-read FASTQ files (e.g. Oxford Nanopore), or FASTA sequences (available for a subset of profilers).
+
+You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 6 columns, and a header row as shown in the examples below. Furthermore, nf-core/taxprofiler also requires a second comma-separated file of 3 columns with a header row as in the examples below.
+
+This samplesheet is then specified on the command line as follows:
+
+```console
+--input '[path to samplesheet file]' --databases '[path to database sheet file]'
```
### Multiple runs of the same sample
-The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate the raw reads before performing any downstream analysis. Below is an example for the same sample sequenced across 3 lanes:
+The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate different runs FASTQ files of the same sample before performing profiling, when `--perform_runmerging` is supplied. Below is an example for the same sample sequenced across 3 lanes:
```csv title="samplesheet.csv"
-sample,fastq_1,fastq_2
-CONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz
-CONTROL_REP1,AEG588A1_S1_L003_R1_001.fastq.gz,AEG588A1_S1_L003_R2_001.fastq.gz
-CONTROL_REP1,AEG588A1_S1_L004_R1_001.fastq.gz,AEG588A1_S1_L004_R2_001.fastq.gz
+sample,run_accession,instrument_platform,fastq_1,fastq_2,fasta
+2612,run1,ILLUMINA,2612_run1_R1.fq.gz,,
+2612,run2,ILLUMINA,2612_run2_R1.fq.gz,,
+2612,run3,ILLUMINA,2612_run3_R1.fq.gz,2612_run3_R2.fq.gz,
```
+:::warning
+Runs of the same sample sequenced on Illumina platforms with a combination of single and paired-end data will **not** be run-wise concatenated, unless pair-merging is specified. In the example above, `run3` will be profiled independently of `run1` and `run2` if pairs are not merged.
+:::
+
### Full samplesheet
-The pipeline will auto-detect whether a sample is single- or paired-end using the information provided in the samplesheet. The samplesheet can have as many columns as you desire, however, there is a strict requirement for the first 3 columns to match those defined in the table below.
+The pipeline will auto-detect whether a sample is single- or paired-end using the information provided in the samplesheet. The samplesheet can have as many columns as you desire, however, there is a strict requirement for the first 6 columns to match those defined in the table below.
-A final samplesheet file consisting of both single- and paired-end data may look something like the one below. This is for 6 samples, where `TREATMENT_REP3` has been sequenced twice.
+A final samplesheet file consisting of both single- and paired-end data, as well as long-read FASTA files may look something like the one below. This is for 6 samples, where `2612` has been sequenced twice.
```csv title="samplesheet.csv"
-sample,fastq_1,fastq_2
-CONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz
-CONTROL_REP2,AEG588A2_S2_L002_R1_001.fastq.gz,AEG588A2_S2_L002_R2_001.fastq.gz
-CONTROL_REP3,AEG588A3_S3_L002_R1_001.fastq.gz,AEG588A3_S3_L002_R2_001.fastq.gz
-TREATMENT_REP1,AEG588A4_S4_L003_R1_001.fastq.gz,
-TREATMENT_REP2,AEG588A5_S5_L003_R1_001.fastq.gz,
-TREATMENT_REP3,AEG588A6_S6_L003_R1_001.fastq.gz,
-TREATMENT_REP3,AEG588A6_S6_L004_R1_001.fastq.gz,
+sample,run_accession,instrument_platform,fastq_1,fastq_2,fasta
+2611,ERR5766174,ILLUMINA,,,///fasta/ERX5474930_ERR5766174_1.fa.gz
+2612,ERR5766176,ILLUMINA,///fastq/ERX5474932_ERR5766176_1.fastq.gz,///fastq/ERX5474932_ERR5766176_2.fastq.gz,
+2612,ERR5766180,ILLUMINA,///fastq/ERX5474936_ERR5766180_1.fastq.gz,,
+2613,ERR5766181,ILLUMINA,///fastq/ERX5474937_ERR5766181_1.fastq.gz,///fastq/ERX5474937_ERR5766181_2.fastq.gz,
+ERR3201952,ERR3201952,OXFORD_NANOPORE,///fastq/ERR3201952.fastq.gz,,
```
-| Column | Description |
-| --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `sample` | Custom sample name. This entry will be identical for multiple sequencing libraries/runs from the same sample. Spaces in sample names are automatically converted to underscores (`_`). |
-| `fastq_1` | Full path to FastQ file for Illumina short reads 1. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". |
-| `fastq_2` | Full path to FastQ file for Illumina short reads 2. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". |
+:::warning
+Input FASTQ and FASTA files _must_ be gzipped.
+:::
+
+:::warning
+While one can include both short-read and long-read data in one run, we recommend that you split these across _two_ pipeline runs and database sheets (see below). This will allow classification optimisation for each data type, and make MultiQC run-reports more readable (due to run statistics having vary large number differences).
+:::
+
+| Column | Description |
+| --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `sample` | Unique sample name [required]. |
+| `run_accession` | Run ID or name unique for each (pairs of) file(s) .Can also supply sample name again here, if only a single run was generated [required]. |
+| `instrument_platform` | Sequencing platform reads generated on, selected from the EBI ENA [controlled vocabulary](https://www.ebi.ac.uk/ena/portal/api/controlledVocab?field=instrument_platform) [required]. |
+| `fastq_1` | Path or URL to sequencing reads or for Illumina R1 sequencing reads in FASTQ format. GZipped compressed files accepted. Can be left empty if data in FASTA is specified. Cannot be combined with `fasta`. |
+| `fastq_2` | Path or URL to Illumina R2 sequencing reads in FASTQ format. GZipped compressed files accepted. Can be left empty if single end data. Cannot be combined with `fasta`. |
+| `fasta` | Path or URL to long-reads or contigs in FASTA format. GZipped compressed files accepted. Can be left empty if data in FASTA is specified. Cannot be combined with `fastq_1` or `fastq_2`. |
An [example samplesheet](../assets/samplesheet.csv) has been provided with the pipeline.
+### Full database sheet
+
+nf-core/taxprofiler supports multiple databases being classified/profiled against in parallel for each tool.
+
+Databases can be supplied either in the form of a compressed `.tar.gz` archive of a directory containing all relevant database files or the path to a directory on the filesystem.
+
+:::warning
+nf-core/taxprofiler does not provide any databases by default, nor does it currently generate them for you. This must be performed manually by the user. See bottom of this section for more information of the expected database files, or the [building custom database](usage/tutorials#retrieving-databases-or-building-custom-databases) tutorials.
+:::
+
+The pipeline takes the paths and specific classification/profiling parameters of the tool of these databases as input via a four column comma-separated sheet.
+
+:::warning
+To allow user freedom, nf-core/taxprofiler does not check for mandatory or the validity of non-file database parameters for correct execution of the tool - excluding options offered via pipeline level parameters! Please validate your database parameters (cross-referencing [parameters](https://nf-co.re/taxprofiler/parameters), and the given tool documentation) before submitting the database sheet! For example, if you don't use the default read length - Bracken will require `-r ` in the `db_params` column.
+:::
+
+An example database sheet can look as follows, where 7 tools are being used, and `malt` and `kraken2` will be used against two databases each.
+
+`kraken2` will be run twice even though only having a single 'dedicated' database because specifying `bracken` implies first running `kraken2` on the `bracken` database, as required by `bracken`.
+
+```csv
+tool,db_name,db_params,db_path
+malt,malt85,-id 85,///malt/testdb-malt/
+malt,malt95,-id 90,///malt/testdb-malt.tar.gz
+bracken,db1,;-r 150,///bracken/testdb-bracken.tar.gz
+kraken2,db2,--quick,///kraken2/testdb-kraken2.tar.gz
+krakenuniq,db3,,///krakenuniq/testdb-krakenuniq.tar.gz
+centrifuge,db1,,///centrifuge/minigut_cf.tar.gz
+metaphlan,db1,,///metaphlan/metaphlan_database/
+motus,db_mOTU,,///motus/motus_database/
+ganon,db1,,///ganon/test-db-ganon.tar.gz
+kmcp,db1,;-I 20,///kmcp/test-db-kmcp.tar.gz
+```
+
+:::warning
+For Bracken and KMCP, which are two step profilers, nf-core/taxprofiler has a special way of passing parameters to each steps!
+
+For Bracken, if you wish to supply any parameters to both the Kraken or Bracken steps or just the Bracken step, you **must** have a _semi-colon_ `;` list in the `db_params` column. This allows you to specify the Kraken2 parameters before and Bracken parameters after the `;`. This is particularly important if you supply a Bracken database with a non-default read length parameter. If you do not have any parameters to specify, you can leave this column empty. If you wish to provide settings to _just_ the Kraken2 step of the Bracken profiling, you can supply a normal string to the column without a semi-colon. If you wish to supply parameters to only Bracken (and keep default Kraken2 parameters), then you supply a string to the column starting with `;` and the Bracken parameters _after_.
+
+Similiarly, for KMCP, if you want to supply parameters for both the first (KMCP search) and the _second step_ (KMCP profile) steps, you **must** have a _semi-colon_ separated`;` list in `db_params`. If you wish to provide parameters to just KMCP search, you do not need the `;`. If you want to supply parameters to just KMCP profile (and keep search parameters at default), then you must start the string with `;` and the KMCP profile parameters come _after_ the semi colon. If you do not wish to modify any parameters, you can leave the column empty (i.e. the `;` is not necessary).
+
+This allows you to specify the KMCP search and the KMCP profile parameters, separated by `;`. If you do not have any parameters to specify, you can leave this as empty.
+:::
+
+Column specifications are as follows:
+
+| Column | Description |
+| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `tool` | Taxonomic profiling tool (supported by nf-core/taxprofiler) that the database has been indexed for [required]. Please note that `bracken` also implies running `kraken2` on the same database. |
+| `db_name` | A unique name per tool for the particular database [required]. Please note that names need to be unique across both `kraken2` and `bracken` as well, even if re-using the same database. |
+| `db_params` | Any parameters of the given taxonomic classifier/profiler that you wish to specify that the taxonomic classifier/profiling tool should use when profiling against this specific database. Can be empty to use taxonomic classifier/profiler defaults. Must not be surrounded by quotes [required]. We generally do not recommend specifying parameters here that turn on/off saving of output files or specifying particular file extensions - this should be already addressed via pipeline parameters. For Bracken databases, must at a minimum contain a `;` separating Kraken2 from Bracken parameters. |
+| `db_path` | Path to the database. Can either be a path to a directory containing the database index files or a `.tar.gz` file which contains the compressed database directory with the same name as the tar archive, minus `.tar.gz` [required]. |
+
+:::tip
+You can also specify the same database directory/file twice (ensuring unique `db_name`s) and specify different parameters for each database to compare the effect of different parameters during classification/profiling.
+:::
+
+nf-core/taxprofiler will automatically decompress and extract any compressed archives for you.
+
+The (uncompressed) database paths (`db_path`) for each tool are expected to contain:
+
+- [**Bracken**:](usage/tutorials.md#bracken-custom-database) output of the combined `kraken2-build` and `bracken-build` process.
+- [**Centrifuge**:](usage/tutorials.md#centrifuge-custom-database) output of `centrifuge-build`.
+- [**DIAMOND**:](usage/tutorials.md#diamond-custom-database) output of `diamond makedb`.
+- [**Kaiju**:](usage/tutorials.md#kaiju-custom-database) output of `kaiju-makedb`.
+- [**Kraken2**:](usage/tutorials.md#kraken2-custom-database) output of `kraken2-build` command(s).
+- [**KrakenUniq**:](usage/tutorials.md#krakenuniq-custom-database) output of `krakenuniq-build` command(s).
+- [**MALT**](usage/tutorials.md#malt-custom-database) output of `malt-build`.
+- [**MetaPhlAn**:](usage/tutorials.md#metaphlan-custom-database) output of with `metaphlan --install` or downloaded from links on the [MetaPhlAn wiki](https://github.com/biobakery/MetaPhlAn/wiki/MetaPhlAn-4#customizing-the-database).
+- [**mOTUs**:](usage/tutorials.md#motus-custom-database) the directory `db_mOTU/` that is downloaded via `motus downloadDB`.
+- [**ganon**:](usage/tutorials.md#ganon-custom-database) output of `ganon build` or `ganon build-custom`.
+- [**KMCP**:](usage/tutorials.md#kmcp-custom-database) output of `kmcp index`. Note: `kmcp index` uses the output of an upstream `kmcp compute` step.
+
+:::info
+Click the links in the list above for short quick-reference tutorials how to generate custom databases for each tool.
+:::
+
## Running the pipeline
The typical command for running the pipeline is as follows:
```bash
-nextflow run nf-core/taxprofiler --input ./samplesheet.csv --outdir ./results --genome GRCh37 -profile docker
+nextflow run nf-core/taxprofiler --input samplesheet.csv --databases databases.csv --outdir -profile docker --run_ --run_
```
This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles.
+When running nf-core/taxprofiler, every step and tool is 'opt in'. To run a given classifier/profiler you must make sure to supply both a database in your `.csv` and supply `--run_` flag to your command. Omitting either will result in the classification/profiling tool not executing. If you wish to perform pre-processing (adapter clipping, merge running etc.) or post-processing (visualisation) steps, these are also opt in with a `--perform_` flag. In some cases, the pre- and post-processing steps may also require additional files. Please check the parameters tab of this documentation for more information.
+
Note that the pipeline will create the following files in your working directory:
```bash
@@ -96,6 +212,185 @@ genome: 'GRCh37'
You can also generate such `YAML`/`JSON` files via [nf-core/launch](https://nf-co.re/launch).
+### Sequencing quality control
+
+[`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) gives general quality metrics about your reads. It provides information about the quality score distribution across your reads, per base sequence content (%A/T/G/C), adapter contamination and overrepresented sequences. nf-core taxprofiler offers [`falco`](https://github.com/smithlabcode/falco) as an drop-in replacement, with supposedly better improvement particularly for long reads.
+
+### Preprocessing Steps
+
+nf-core/taxprofiler offers four main preprocessing steps for preprocessing raw sequencing reads:
+
+- [**Read processing**](#read-processing): adapter clipping and pair-merging.
+- [**Complexity filtering**](#complexity-filtering): removal of low-sequence complexity reads.
+- [**Host read-removal**](#host-read-removal): removal of reads aligning to reference genome(s) of a host.
+- [**Run merging**](#run-merging): concatenation of multiple FASTQ chunks/sequencing runs/libraries of a sample.
+
+:::info
+You can save the 'final' reads used for classification/profiling from any combination of these steps with `--save_analysis_ready_reads`.
+:::
+
+#### Read Processing
+
+Raw sequencing read processing in the form of adapter clipping and paired-end read merging can be activated via the `--perform_shortread_qc` or `--perform_longread_qc` flags.
+
+It is highly recommended to run this on raw reads to remove artifacts from sequencing that can cause false positive identification of taxa (e.g. contaminated reference genomes) and/or skews in taxonomic abundance profiles. If you have public data, normally these should have been corrected for, however you should still check that these steps have indeed been already performed.
+
+There are currently two options for short-read preprocessing: [`fastp`](https://github.com/OpenGene/fastp) or [`adapterremoval`](https://github.com/MikkelSchubert/adapterremoval).
+
+For adapter clipping, you can either rely on the tool's default adapter sequences, or supply your own adapters (`--shortread_qc_adapter1` and `--shortread_qc_adapter2`)
+By default, paired-end merging is not activated. In this case paired-end 'alignment' against the reference databases is performed where supported, and if not, supported pairs will be independently classified/profiled. If paired-end merging is activated you can also specify whether to include unmerged reads in the reads sent for classification/profiling (`--shortread_qc_mergepairs` and `--shortread_qc_includeunmerged`).
+You can also turn off clipping and only perform paired-end merging, if requested. This can be useful when processing data downloaded from the ENA, SRA, or DDBJ (`--shortread_qc_skipadaptertrim`).
+Both tools support length filtering of reads and can be tuned with `--shortread_qc_minlength`. Performing length filtering can be useful to remove short (often low sequencing complexity) sequences that result in unspecific classification and therefore slow down runtime during classification/profiling, with minimal gain.
+
+There is currently one option for long-read Oxford Nanopore processing: [`porechop`](https://github.com/rrwick/Porechop).
+
+For both short-read and long-read preprocessing, you can optionally save the resulting processed reads with `--save_preprocessed_reads`.
+
+#### Complexity Filtering
+
+Complexity filtering can be activated via the `--perform_shortread_complexityfilter` flag.
+
+Complexity filtering is primarily a run-time optimisation step. It is not necessary for accurate taxonomic classification/profiling, however it can speed up run-time of each tool by removing reads with low-diversity of nucleotides (e.g. with mono-nucleotide - `AAAAAAAA`, or di-nucleotide repeats `GAGAGAGAGAGAGAG`) that have a low-chance of giving an informative taxonomic ID as they can be associated with many different taxa. Removing these reads therefore saves computational time and resources.
+
+There are currently three options for short-read complexity filtering: [`bbduk`](https://jgi.doe.gov/data-and-tools/software-tools/bbtools/bb-tools-user-guide/bbduk-guide/), [`prinseq++`](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus), and [`fastp`](https://github.com/OpenGene/fastp#low-complexity-filter).
+
+There is one option for long-read quality filtering: [`Filtlong`](https://github.com/rrwick/Filtlong)
+
+The tools offer different algorithms and parameters for removing low complexity reads and quality filtering. We therefore recommend reviewing the pipeline's [parameter documentation](https://nf-co.re/taxprofiler/parameters) and the documentation of the tools (see links above) to decide on optimal methods and parameters for your dataset.
+
+You can optionally save the FASTQ output of the run merging with the `--save_complexityfiltered_reads`. If running with `fastp`, complexity filtering happens inclusively within the earlier shortread preprocessing step. Therefore there will not be an independent pipeline step for complexity filtering, and no independent FASTQ file (i.e. `--save_complexityfiltered_reads` will be ignored) - your complexity filtered reads will also be in the `fastp/` folder in the same file(s) as the preprocessed read.
+
+:::warning
+For nanopore data: we do not recommend performing any read preprocessing or complexity filtering if you are using ONTs Guppy toolkit for basecalling and post-processing.
+:::
+
+#### Host-Read Removal
+
+Removal of possible-host reads from FASTQ files prior classification/profiling can be activated with `--perform_shortread_hostremoval` or `--perform_longread_hostremoval`.
+
+Similarly to complexity filtering, host-removal can be useful for runtime optimisation and reduction in misclassified reads. It is not always necessary to report classification of reads from a host when you already know the host of the sample, therefore you can gain a run-time and computational advantage by removing these prior typically resource-heavy classification/profiling with more efficient methods. Furthermore, particularly with human samples, you can reduce the number of false positives during classification/profiling that occur due to host-sequence contamination in reference genomes on public databases.
+
+nf-core/taxprofiler currently offers host-removal via alignment against a reference genome with Bowtie2 for short reads and minimap2 for long reads, and the use of the unaligned reads for downstream classification/profiling.
+
+You can supply your reference genome in FASTA format with `--hostremoval_reference`. You can also optionally supply a directory containing pre-indexed Bowtie2 index files with `--shortread_hostremoval_index` or a minimap2 `.mmi` file for `--longread_hostremoval_index`, however nf-core/taxprofiler will generate these for you if necessary. Pre-supplying the index directory or files can greatly speed up the process, and these can be re-used.
+
+:::tip
+If you have multiple taxa or sequences you wish to remove (e.g., the host genome and then also PhiX - common quality-control reagent during sequencing) you can simply concatenate the FASTAs of each taxa or sequences into a single reference file.
+:::
+
+#### Run Merging
+
+For samples that may have been sequenced over multiple runs, or for FASTQ files split into multiple chunks, you can activate the ability to merge across all runs or chunks with `--perform_runmerging`.
+
+For more information how to set up your input samplesheet, see [Multiple runs of the same sample](#multiple-runs-of-the-same-sample).
+
+Activating this functionality will concatenate the FASTQ files with the same sample name _after_ the optional preprocessing steps and _before_ classification/profiling. Note that libraries with runs of different pairing types will **not** be merged and this will be indicated on output files with a `_se` or `_pe` suffix to the sample name accordingly.
+
+You can optionally save the FASTQ output of the run merging with the `--save_runmerged_reads`.
+
+#### Classification and Profiling
+
+The following sections provide tips and suggestions for running the different taxonomic classification and profiling tools _within the pipeline_. For advice and/or guidance whether you should run a particular tool on your specific data, please see the documentation of each tool!
+
+An important distinction between the different tools in included in the pipeline is classification versus profiling. Taxonomic _classification_ is concerned with simply detecting the presence of species in a given sample. Taxonomic _profiling_ involves additionally estimating the _abundance_ of each species.
+
+Note that not all taxonomic classification tools (e.g. Kraken, MALT, Kaiju) performs _profiling_, but all taxonomic profilers (e.g. MetaPhlAn, mOTUs, Bracken) must perform some form of _classification_ prior to profiling.
+
+For advice as to which tool to run in your context, please see the documentation of each tool.
+
+:::note
+If you would like to change this behaviour, please contact us on the [nf-core slack](https://nf-co.re/join) and we can discuss this.
+:::
+
+Not all tools currently have dedicated tips, suggestions and/or recommendations, however we welcome further contributions for existing and additional tools via pull requests to the [nf-core/taxprofiler repository](https://github.com/nf-core/taxprofiler)!
+
+##### Bracken
+
+You must make sure to also activate Kraken2 to run Bracken in the pipeline.
+
+It is unclear whether Bracken is suitable for running long reads, as it makes certain assumptions about read lengths. Furthermore, during testing we found issues where Bracken would fail on the long-read test data.
+
+Therefore currently nf-core/taxprofiler does not run Bracken on data specified as being sequenced with `OXFORD_NANOPORE` in the input samplesheet.
+
+##### Centrifuge
+
+Centrifuge currently does not accept FASTA files as input, therefore no output will be produced for these input files.
+
+##### DIAMOND
+
+DIAMOND only allows output of a single file format at a time, therefore parameters such `--diamond_save_reads` supplied will result in only aligned reads in SAM format will be produced, no taxonomic profiles will be available. Be aware of this when setting up your pipeline runs, depending on your particular use case.
+
+##### Kaiju
+
+Currently, no specific tips or suggestions.
+
+##### Kraken2
+
+Currently, no specific tips or suggestions.
+
+##### KrakenUniq
+
+Currently, no specific tips or suggestions.
+
+##### MALT
+
+MALT does not support paired-end reads alignment (unlike other tools), therefore nf-core/taxprofiler aligns these as independent files if read-merging is skipped. If you skip merging, you can sum or average the results of the counts of the pairs.
+
+Krona can only be run on MALT output if path to Krona taxonomy database supplied to `--krona_taxonomy_directory`. Therefore if you do not supply the a Krona directory, Krona plots will not be produced for MALT.
+
+##### MetaPhlAn
+
+MetaPhlAn4 is compatible with the MetaPhlAn3 database by adding the `--mpa3` into `db_params` of the `database.csv`.
+
+##### mOTUs
+
+mOTUs currently does not accept FASTA files as input, therefore no output will be produced for these input files.
+
+##### ganon
+
+It is unclear whether ganon is suitable for running long reads - during testing we found issues where ganon would fail on the long-read test data.
+
+Therefore currently nf-core/taxprofiler does not run ganon on data specified as being sequenced with `OXFORD_NANOPORE` in the input samplesheet.
+
+##### KMCP
+
+KMCP is only suitable for short-read metagenomic profiling, with much lower sensitivity on long-read datasets. Therefore, nf-core/taxprofiler does not currently run KMCP on data specified as being sequenced with `OXFORD_NANOPORE` in the input samplesheet.
+
+#### Post Processing
+
+##### Visualisation
+
+nf-core/taxprofiler supports generation of Krona interactive pie chart plots for the following compatible tools.
+
+- Kraken2
+- Centrifuge
+- Kaiju
+- MALT
+
+:::warning
+MALT KRONA plots cannot be generated automatically, you must also specify a Krona taxonomy directory with `--krona_taxonomy_directory` if you wish to generate these.
+:::
+
+##### Multi-Table Generation
+
+The main multiple-sample table from nf-core/taxprofiler is from a dedicated standalone tool originally developed for the pipeline - [Taxpasta](https://taxpasta.readthedocs.io/en/latest/). When providing `--run_profile_standardisation`, every classifier/profiler and database combination will get a standardised and (if present) multi-sample taxon table in the [`taxpasta/`](https://nf-co.re/taxprofiler/output) directory. These tables are structured in the same way, to facilitate comparison between the results of the classifier/profiler. If multiple samples are provided, `taxpasta merge` will be executed, whereas if only a single sample is provided, `taxpasta standardise` will be executed - the file naming scheme will be the same for both.
+
+In addition to per-sample profiles and standardised Taxpasta output, the pipeline also supports generation of 'native' multi-sample taxonomic profiles (i.e., those generated by the taxonomic profiling tools themselves or additional utility scripts provided by the tool authors), when providing `--run_profile_standardisation` to your pipeline.
+
+These are executed on a per-database level. I.e., you will get a multi-sample taxon table for each database you provide for each tool and will be placed in the same directory as the directories containing the per-sample profiles.
+
+The following tools will produce multi-sample taxon tables:
+
+- **Bracken** (via bracken's `combine_bracken_outputs.py` script)
+- **Centrifuge** (via KrakenTools' `combine_kreports.py` script)
+- **Kaiju** (via Kaiju's `kaiju2table` tool)
+- **Kraken2** (via KrakenTools' `combine_kreports.py` script)
+- **MetaPhlAn** (via MetaPhlAn's `merge_metaphlan_tables.py` script)
+- **mOTUs** (via the `motus merge` command)
+- **ganon** (via the `ganon table` command)
+
+Note that the multi-sample tables from the 'native' tools in each folders are [not inter-operable](https://taxpasta.readthedocs.io/en/latest/tutorials/getting-started/) with each other as they can have different formats and can contain additional and different data. In this case we refer you to use the standardised and merged output from Taxpasta, as described above.
+
### Updating the pipeline
When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline:
@@ -112,7 +407,7 @@ First, go to the [nf-core/taxprofiler releases page](https://github.com/nf-core/
This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. For example, at the bottom of the MultiQC reports.
-To further assist in reproducbility, you can use share and re-use [parameter files](#running-the-pipeline) to repeat pipeline runs with the same settings without having to write out a command with every single parameter.
+To further assist in reproducibility, you can use share and re-use [parameter files](#running-the-pipeline) to repeat pipeline runs with the same settings without having to write out a command with every single parameter.
:::tip
If you wish to share such profile (such as upload as supplementary material for academic publications), make sure to NOT include cluster specific paths to files, nor institutional specific profiles.
@@ -139,7 +434,7 @@ The pipeline also dynamically loads configurations from [https://github.com/nf-c
Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order of arguments is important!
They are loaded in sequence, so later profiles can overwrite earlier profiles.
-If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer enviroment.
+If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer environment.
- `test`
- A profile with a complete configuration for automated testing
diff --git a/docs/usage/faq-troubleshooting.md b/docs/usage/faq-troubleshooting.md
new file mode 100644
index 00000000..3c80725d
--- /dev/null
+++ b/docs/usage/faq-troubleshooting.md
@@ -0,0 +1,9 @@
+# Troubleshooting and FAQs
+
+## I get a warning during centrifuge_kreport process with exit status 255
+
+When a sample has insufficient hits for abundance estimation, the resulting `report.txt` file will be empty.
+
+When trying to convert this to a kraken-style report, the conversion tool will exit with a status code `255`, and provide a `WARN`.
+
+This is _not_ an error nor a failure of the pipeline, just your sample has no hits to the provided database when using centrifuge.
diff --git a/docs/usage/tutorials.md b/docs/usage/tutorials.md
new file mode 100644
index 00000000..000736b5
--- /dev/null
+++ b/docs/usage/tutorials.md
@@ -0,0 +1,620 @@
+# nf-core/taxprofiler: Tutorials
+
+This page provides a range of tutorials to help give you a bit more guidance on how to set up nf-core/taxprofiler runs in the wild.
+
+## Simple Tutorial
+
+In this tutorial we will run you through a simple set up of a small nf-core/taxprofiler run.
+It assumes that you have basic knowledge of metagenomic classification input and output files.
+
+### Preparation
+
+#### Hardware
+
+The datasets used should be small enough to run on your own laptop or a single server node.
+
+If you wish to use a HPC cluster or cloud, and don't wish to use an 'interactive' session submitted to your scheduler, please see the [nf-core documentation](https://nf-co.re/docs/usage/configuration#introduction) on how to make a relevant config file.
+
+You will need internet access and at least 1.5 GB of hardrive space.
+
+#### Software
+
+The tutorial assumes you are on a Unix based operating system, and have already installed Nextflow as well a software environment system such as [Conda](https://docs.conda.io/en/latest/miniconda.html), [Docker](https://www.docker.com/), or [Singularity/Apptainer](https://apptainer.org/).
+The tutorial will use Docker, however you can simply replace references to `docker` with `conda`, `singularity`, or `apptainer` accordingly.
+
+#### Data
+
+First we will make a directory to run the whole tutorial in.
+
+```bash
+mkdir taxprofiler-tutorial
+cd taxprofiler-tutorial/
+```
+
+We will use very small short-read (pre-subset) metagenomes used for testing.
+nf-core/taxprofiler accepts FASTQ or FASTA files as input formats, however we will use FASTQ here as the more common format in taxonomic classification.
+You can download these metagenomes with the following command.
+
+```bash
+curl -O https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/data/fastq/ERX5474932_ERR5766176_1.fastq.gz
+curl -O https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/data/fastq/ERX5474932_ERR5766176_2.fastq.gz
+curl -O https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/data/fastq/ERX5474932_ERR5766176_B_1.fastq.gz
+curl -O https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/data/fastq/ERX5474932_ERR5766176_B_2.fastq.gz
+```
+
+In this tutorial we will demonstrate running with three different profilers, and in one of those cases, running the same database twice but with different parameters.
+The database consists of two genomes of species known to be present in the metagenomes.
+You can download the databases for Kraken2, Centrifuge, and Kaiju with the following commands.
+
+```bash
+curl -O https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/data/database/kraken2/testdb-kraken2.tar.gz
+curl -O https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/data/database/centrifuge/test-db-centrifuge.tar.gz
+curl -O https://raw.githubusercontent.com/nf-core/test-datasets/taxprofiler/data/database/kaiju/kaiju.tar.gz
+```
+
+To demonstrate that nf-core/taxprofiler can also accept databases as uncompressed folders, we can extract one of them.
+
+```bash
+tar -xzf kaiju.tar.gz
+```
+
+:::note
+You have provide these databases pre-built to the pipeline, nf-core/taxprofiler neither comes with default databases not can generate databases for you.
+For guidance on how to build databases, see the [Retrieving databases or building custom databases](#retrieving-databases-or-building-custom-databases) tutorial.
+:::
+
+Finally, an important step of any metagenomic classification is to remove contamination.
+Contamination can come from many places, typically from the host of a host-associated sample, however this can also come from laboratory processing samples.
+A common contaminant in Illumina sequencing is a spike-in control of the genome of PhiX virus, which we can download with the following command.
+
+```bash
+curl -O https://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/819/615/GCF_000819615.1_ViralProj14015/GCF_000819615.1_ViralProj14015_genomic.fna.gz
+```
+
+### Preparing Input
+
+#### Sample sheet
+
+You provide the sequencing data FASTQ files to nf-core/taxprofiler via a input 'sample sheet' `.csv` file.
+This is a 6 column table, that includes sample and library names, instrument platform, and paths to the sequencing data.
+
+Open a text editor, and create a file called `samplesheet.csv`.
+Copy and paste the following lines into the file and save it.
+
+```csv title="samplesheet.csv"
+sample,run_accession,instrument_platform,fastq_1,fastq_2,fasta
+ERX5474932,ERR5766176,ILLUMINA,ERX5474932_ERR5766176_1.fastq.gz,ERX5474932_ERR5766176_2.fastq.gz,
+ERX5474932,ERR5766176_B,ILLUMINA,ERX5474932_ERR5766176_B_1.fastq.gz,ERX5474932_ERR5766176_B_2.fastq.gz,
+```
+
+Here we have specified two libraries of the same sample, that they were sequencing on Illumina platforms, and the paths to the FASTQ files.
+If you had placed your FASTQ files elsewhere, you would give the full path (i.e., with relevant directories) to the `fastq_1`, `fastq_2` and `fasta` columns.
+
+#### Database sheet
+
+For the database(s), you also supply these via a `.csv` file.
+This 4 column table contains the tool the database has been built for, a database name, the parameters you wish reads to be queried against the given database with, and a path to a `.tar.gz` archive file or a directory containing the database files.
+
+Open a text editor, and create a file called `database.csv`.
+Copy and paste the following csv file into the file and save it.
+
+```csv title="database.csv"
+tool,db_name,db_params,db_path
+kraken2,db1,--quick,testdb-kraken2.tar.gz
+centrifuge,db2,,test-db-centrifuge.tar.gz
+centrifuge,db2_trimmed,--trim5 2 --trim3 2,test-db-centrifuge.tar.gz
+kaiju,db3,,kaiju/
+```
+
+You can see here we have specified the Centrifuge database twice, to allow comparison of different settings.
+Note that the each database of the same tool has a unique name.
+Furthermore, while the Kraken2 and Centrifuge databases have been supplied as `.tar.gz` archives, the Kaiju database has been supplied as a directory.
+
+### Running the pipeline
+
+Now that we have the sequencing reads (in FASTQ format), the databases (directory or `.tar.gz`), and a reference genome (FASTA, optionally gzipped), we can now run them with the pipeline. The following command will perform short read quality control, remove contaminant reads, merge multiple libraries for each sample, run the three profilers, and finally generate standardised profiles.
+
+```bash
+nextflow run nf-core/taxprofiler -r 1.1.0 -profile docker \
+--input samplesheet.csv --databases database.csv --outdir ./results \
+--perform_shortread_qc \
+--perform_shortread_hostremoval --hostremoval_reference GCF_000819615.1_ViralProj14015_genomic.fna.gz \
+--perform_runmerging --save_runmerged_reads \
+--run_centrifuge --run_kaiju --run_kraken2 \
+--run_profile_standardisation \
+--max_cpus 2 --max_memory '6.GB'
+```
+
+:::info
+With all Docker containers pre-downloaded, this run took 2 minutes and 31 seconds on a laptop running Ubuntu 22.04.2 with 32 GB RAM and 16 CPUs.
+If you are running nf-core/taxprofiler for the first time, expect this command to take longer as Nextflow will have to download each software container for each step of the pipeline.
+:::
+
+To break down each line of the command:
+
+- Tell Nextflow to run nf-core/taxprofiler with the particular version and using the Docker container system
+- Specify the input and outputs, i.e., paths to the `samplesheet.csv`, `database.csv`, and directory where to save the results
+- Turn on basic quality control of input reads: adapter clipping, length filtering, etc
+- Turn on the removal of host or contaminant reads, and specify the path to reference genome of this
+- Turn on run merging, i.e., combine the processed input reads of the multiple libraries into each sample, and save these reads (e.g. for downstream use)
+- Turn on the different taxonomic profiling tools you wish to use
+- Turn on profile standardisation and multi-sample taxon tables
+- (Optional) provide a _cap_ to the maximum amount of resources each step/job of the pipeline can use
+
+:::warning
+The `--max_cpu`, `--max_memory`, `--max_time` parameters _do not_ increase the amount of memory a step of the pipeline uses!
+They simply prevent Nextflow requesting more than this threshold, e.g. more than available on your machine.
+To learn how to increase computational resource to the pipeline, see the central [nf-core documentation](https://nf-co.re/docs/usage/configuration).
+:::
+
+The pipeline run can be represented (in a simplified format!) as follows
+
+```mermaid
+graph LR
+0([FASTQs]) --> X[FastQC] --> A[FastP] --> Y[FastQC] --> B[BowTie2]
+0([FASTQs]) --> X[FastQC] --> A[FastP] --> Y[FastQC] --> B[BowTie2]
+
+3([Reference FASTA]) -----> B
+
+2([Databases]) -------> D[Kraken2]
+2([Databases]) -------> E[Centrifuge]
+2([Databases]) -------> E[Centrifuge]
+2([Databases]) -------> F[Kaiju]
+
+B--> C[Run Merging]
+B--> C[Run Merging]
+C --> D[Kraken2]
+C --> E[Centrifuge]
+C --> F[Kaiju]
+
+D --> G[combinekreports]
+E --> H[combinekreports]
+F --> I[Kaiju2Table]
+
+D ---> J[TAXPASTA]
+E ---> J[TAXPASTA]
+F ---> J[TAXPASTA]
+
+X ---> K[MultiQC]
+A ---> K[MultiQC]
+B ---> K[MultiQC]
+D ---> K[MultiQC]
+E ---> K[MultiQC]
+F ---> K[MultiQC]
+```
+
+:::tip{title=""}
+We hope you see the benefit of using pipelines for such a task!
+:::
+
+### Output
+
+In the resulting directory `results/` you will find a range of directories.
+
+```tree
+results/
+├── bowtie2
+├── centrifuge
+├── fastp
+├── fastqc
+├── kaiju
+├── kraken2
+├── multiqc
+├── pipeline_info
+├── run_merging
+├── samtools
+└── taxpasta
+```
+
+To follow the same order as the command construction above
+
+- Pipeline run report is found in `multiqc/` and resource statistics in `pipeline_info`
+- Short-read QC results are found in `fastqc/` and `fastp/`
+- Host/contaminant removal results are found in `bowtie2/` and `samtools/`
+- Lane merged preprocessed reads are found in `run_merging/`
+- Raw profiling results are found in `kraken2/`, `centrifuge/`, and `kaiju/`
+- Standardised profiles for all profiling tools and databases are found in `taxpasta`
+
+:::info
+Within each classifier results directory, there will be one directory and 'combined samples table' per database.
+:::
+
+:::info
+For read-preprocessing steps, only log files are stored in the `results/` directories by default. Refer to the parameters tab of the [nf-core/taxprofiler documentation](https://nf-co.re/taxprofiler/) for more options.
+:::
+
+The general 'workflow' of going through the results will typically be reviewing the `multiqc/multiqc_report.html` file to get general statistics of the entire run, particularly of the preprocessing.
+You would then use the taxon tables in the `taxpasta/` directory for downstream analysis, but referring to the classifier specific results directories when you require more detailed information on each classification.
+
+Detailed descriptions of all results files can be found in the output tab of the [nf-core/taxprofiler documentation](https://nf-co.re/taxprofiler/).
+
+### Clean up
+
+Once you have completed the tutorial, you can run the following command to delete all downloaded and output files.
+
+```bash
+rm -r taxprofiler-tutorial/
+```
+
+:::warning
+Don't forget to change out of the directory above before trying to delete it!
+:::
+
+## Retrieving databases or building custom databases
+
+Not all taxonomic profilers provide ready-made or default databases. Here we will give brief guidance on how to build custom databases for each supported taxonomic profiler.
+
+You should always consult the documentation of each tool for more information, as here we only provide short minimal-tutorials as quick reference guides (with no guarantee they are up to date).
+
+The following tutorials assumes you already have the tool available (e.g. installed locally, or via conda, docker etc.), and you have already downloaded the FASTA files you wish to build into a database.
+
+### Bracken custom database
+
+Bracken does not require an independent database but rather builds upon Kraken2 databases. [The pre-built Kraken2 databases hosted by Ben Langmead](https://benlangmead.github.io/aws-indexes/k2) already contain the required files to run Bracken.
+
+However, to build custom databases, you will need a Kraken2 database, the (average) read lengths (in bp) of your sequencing experiment, the K-mer size used to build the Kraken2 database, and Kraken2 available on your machine.
+
+```bash
+bracken-build -d -k -l
+```
+
+:::tip
+You can speed up database construction by supplying the threads parameter (`-t`).
+:::
+
+:::tip
+If you do not have Kraken2 in your `$PATH` you can point to the binary with `-x ///kraken2`.
+:::
+
+
+Expected files in database directory
+
+- `bracken`
+ - `hash.k2d`
+ - `opts.k2d`
+ - `taxo.k2d`
+ - `database100mers.kmer_distrib`
+ - `database150mers.kmer_distrib`
+
+
+
+You can follow Bracken [tutorial](https://ccb.jhu.edu/software/bracken/index.shtml?t=manual) for more information.
+
+### Centrifuge custom database
+
+To build a custom Centrifuge database, a user needs to download taxonomy files, make a custom `seqid2taxid.map` and combine the fasta files together.
+
+In total, you need four components: a tab-separated file mapping sequence IDs to taxonomy IDs (`--conversion-table`), a tab-separated file mapping taxonomy IDs to their parents and rank, up to the root of the tree (`--taxonomy-tree`), a pipe-separated file mapping taxonomy IDs to a name (`--name-table`), and the reference sequences.
+
+An example of custom `seqid2taxid.map`:
+
+```csv title="seqid2taxid.map"
+ NC_001133.9 4392
+ NC_012920.1 9606
+ NC_001134.8 4392
+ NC_001135.5 4392
+```
+
+```bash
+centrifuge-download -o taxonomy taxonomy
+cat *.{fa,fna} > input-sequences.fna
+centrifuge-build -p 4 --conversion-table seqid2taxid.map --taxonomy-tree taxonomy/nodes.dmp --name-table taxonomy/names.dmp input-sequences.fna taxprofiler_cf
+```
+
+
+Expected files in database directory
+
+- `centrifuge`
+ - `..cf`
+ - `..cf`
+ - `..cf`
+ - `..cf`
+
+
+
+For the Centrifuge custom database documentation, see [here](https://ccb.jhu.edu/software/centrifuge/manual.shtml#custom-database).
+
+### DIAMOND custom database
+
+To create a custom database for DIAMOND, the user should download and unzip the NCBI's taxonomy files and the input FASTA files.
+
+The download and build steps are as follows:
+
+```bash
+wget ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdmp.zip
+unzip taxdmp.zip
+
+## warning: large file!
+wget ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/accession2taxid/prot.accession2taxid.FULL.gz
+
+## warning: takes a long time!
+cat ../raw/*.faa | diamond makedb -d testdb-diamond --taxonmap prot.accession2taxid.FULL.gz --taxonnodes nodes.dmp --taxonnames names.dmp
+
+## clean up
+rm *dmp *txt *gz *prt *zip
+```
+
+
+Expected files in database directory
+
+- `diamond`
+ - `.dmnd`
+
+
+
+A detailed description can be found [here](https://github.com/bbuchfink/diamond/wiki/1.-Tutorial).
+
+### Kaiju custom database
+
+A number of kaiju pre-built indexes for reference datasets are maintained by the developers of kaiju and made available on the [kaiju website](https://bioinformatics-centre.github.io/kaiju/downloads.html). These databases can directly be used to run the workflow with Kaiju.
+
+In case the databases above do not contain your desired libraries, you can build a custom kaiju database. To build a kaiju database, you need three components: a FASTA file with the protein sequences, the NCBI taxonomy dump files, and you need to define the uppercase characters of the standard 20 amino acids you wish to include.
+
+:::warning
+The headers of the protein fasta file must be numeric NCBI taxon identifiers of the protein sequences.
+:::
+
+To download the NCBI taxonomy files, please run the following commands:
+
+```bash
+wget https://ftp.ncbi.nlm.nih.gov/pub/taxonomy/new_taxdump/new_taxdump.zip
+unzip new_taxdump.zip
+```
+
+To build the database, run the following command (the contents of taxdump must be in the same location where you run the command):
+
+```bash
+kaiju-mkbwt -a ACDEFGHIKLMNPQRSTVWY -o proteins proteins.faa
+kaiju-mkfmi proteins
+```
+
+:::tip
+You can speed up database construction by supplying the threads parameter (`-t`).
+:::
+
+
+Expected files in database directory
+
+- `kaiju`
+ - `kaiju_db_*.fmi`
+ - `nodes.dmp`
+ - `names.dmp`
+
+
+
+For the Kaiju database construction documentation, see [here](https://github.com/bioinformatics-centre/kaiju#custom-database).
+
+### Kraken2 custom database
+
+A number of database indexes have already been generated and maintained by [@BenLangmead Lab](https://github.com/BenLangmead), see [here](https://benlangmead.github.io/aws-indexes/k2). These databases can directly be used to run the workflow with Kraken2 as well as Bracken.
+
+In case the databases above do not contain your desired libraries, you can build a custom Kraken2 database. This requires two components: a taxonomy (consisting of `names.dmp`, `nodes.dmp`, and `*accession2taxid`) files, and the FASTA files you wish to include.
+
+To pull the NCBI taxonomy, you can run the following:
+
+```bash
+kraken2-build --download-taxonomy --db
+```
+
+You can then add your FASTA files with the following build command.
+
+```bash
+kraken2-build --add-to-library *.fna --db
+```
+
+You can repeat this step multiple times to iteratively add more genomes prior building.
+
+Once all genomes are added to the library, you can build the database (and optionally clean it up):
+
+```bash
+kraken2-build --build --db
+kraken2-build --clean --db
+```
+
+You can then add the `/` path to your nf-core/taxprofiler database input sheet.
+
+
+Expected files in database directory
+
+- `kraken2`
+ - `opts.k2d`
+ - `hash.k2d`
+ - `taxo.k2d`
+
+
+
+You can follow the Kraken2 [tutorial](https://github.com/DerrickWood/kraken2/blob/master/docs/MANUAL.markdown#custom-databases) for a more detailed description.
+
+### KrakenUniq custom database
+
+For any KrakenUniq database, you require: taxonomy files, the FASTA files you wish to include, a `seqid2mapid` file, and a k-mer length.
+
+First you must make a `seqid2taxid.map` file which is a two column text file containing the FASTA sequence header and the NCBI taxonomy ID for each sequence:
+
+```
+MT192765.1 2697049
+```
+
+Then make a directory (`/`), containing the `seqid2taxid.map` file, and your FASTA files in a subdirectory called `library/` (these FASTA files can be symlinked). You must then run the `taxonomy` command on the `/` directory, and then build it.
+
+```bash
+mkdir -p /library
+mv `seqid2taxid.map` /
+mv *.fna /library
+krakenuniq-download --db taxonomy
+krakenuniq-build --db --kmer-len 31
+```
+
+:::tip
+You can speed up database construction by supplying the threads parameter (`--threads`) to `krakenuniq-build`.
+:::
+
+
+Expected files in database directory
+
+- `krakenuniq`
+ - `opts.k2d`
+ - `hash.k2d`
+ - `taxo.k2d`
+ - `database.idx`
+ - `taxDB`
+
+
+
+Please see the [KrakenUniq documentation](https://github.com/fbreitwieser/krakenuniq#database-building) for more information.
+
+### MALT custom database
+
+To build a MALT database, you need the FASTA files to include, and an (unzipped) [MEGAN mapping 'db' file](https://software-ab.informatik.uni-tuebingen.de/download/megan6/) for your FASTA type. In addition to the input directory, output directory, and the mapping file database, you also need to specify the sequence type (DNA or Protein) with the `-s` flag.
+
+```bash
+malt-build -i ///*.{fna,fa,fasta} -a2t //