diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 00000000..ea27a584
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,27 @@
+{
+ "name": "nfcore",
+ "image": "nfcore/gitpod:latest",
+ "remoteUser": "gitpod",
+
+ // Configure tool-specific properties.
+ "customizations": {
+ // Configure properties specific to VS Code.
+ "vscode": {
+ // Set *default* container specific settings.json values on container create.
+ "settings": {
+ "python.defaultInterpreterPath": "/opt/conda/bin/python",
+ "python.linting.enabled": true,
+ "python.linting.pylintEnabled": true,
+ "python.formatting.autopep8Path": "/opt/conda/bin/autopep8",
+ "python.formatting.yapfPath": "/opt/conda/bin/yapf",
+ "python.linting.flake8Path": "/opt/conda/bin/flake8",
+ "python.linting.pycodestylePath": "/opt/conda/bin/pycodestyle",
+ "python.linting.pydocstylePath": "/opt/conda/bin/pydocstyle",
+ "python.linting.pylintPath": "/opt/conda/bin/pylint"
+ },
+
+ // Add the IDs of extensions you want installed when the container is created.
+ "extensions": ["ms-python.python", "ms-python.vscode-pylance", "nf-core.nf-core-extensionpack"]
+ }
+ }
+}
diff --git a/.gitattributes b/.gitattributes
index 050bb120..7a2dabc2 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,3 +1,4 @@
*.config linguist-language=nextflow
+*.nf.test linguist-language=nextflow
modules/nf-core/** linguist-generated
subworkflows/nf-core/** linguist-generated
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index e93e473a..5ab7fd29 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -101,3 +101,19 @@ If you are using a new feature from core Nextflow, you may bump the minimum requ
### Images and figures
For overview images and other documents we follow the nf-core [style guidelines and examples](https://nf-co.re/developers/design_guidelines).
+
+## GitHub Codespaces
+
+This repo includes a devcontainer configuration which will create a GitHub Codespaces for Nextflow development! This is an online developer environment that runs in your browser, complete with VSCode and a terminal.
+
+To get started:
+
+- Open the repo in [Codespaces](https://github.com/nf-core/mhcquant/codespaces)
+- Tools installed
+ - nf-core
+ - Nextflow
+
+Devcontainer specs:
+
+- [DevContainer config](.devcontainer/devcontainer.json)
+- [Dockerfile](.devcontainer/Dockerfile)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 93fe63de..9aed7942 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -42,7 +42,7 @@ body:
attributes:
label: System information
description: |
- * Nextflow version _(eg. 21.10.3)_
+ * Nextflow version _(eg. 22.10.1)_
* Hardware _(eg. HPC, Desktop, Cloud)_
* Executor _(eg. slurm, local, awsbatch)_
* Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter or Charliecloud)_
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 80129b57..b8ecffb1 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -11,6 +11,10 @@ on:
env:
NXF_ANSI_LOG: false
+concurrency:
+ group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}"
+ cancel-in-progress: true
+
jobs:
test:
name: Run pipeline with test data
@@ -20,11 +24,11 @@ jobs:
strategy:
matrix:
NXF_VER:
- - "21.10.3"
+ - "22.10.1"
- "latest-everything"
steps:
- name: Check out pipeline code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Install Nextflow
uses: nf-core/setup-nextflow@v1
@@ -33,7 +37,7 @@ jobs:
- name: Run pipeline with test data
run: |
- nextflow run ${GITHUB_WORKSPACE} -profile test,docker --outdir ./results
+ nextflow run ${GITHUB_WORKSPACE} -profile test,docker --spectrum_batch_size 5000 --outdir ./results
test_additional_params:
name: Run pipeline with additional params
@@ -45,7 +49,7 @@ jobs:
# Nextflow versions
include:
# Test pipeline minimum Nextflow version
- - NXF_VER: "21.10.3"
+ - NXF_VER: "22.10.1"
NXF_EDGE: ""
# Test latest edge release of Nextflow
- NXF_VER: ""
@@ -66,6 +70,4 @@ jobs:
- name: Run pipeline with additional params
run: |
- nextflow run ${GITHUB_WORKSPACE} -profile test,docker --predict_class_1 --predict_class_2 --predict_RT --outdir ./results
-
-#
+ nextflow run ${GITHUB_WORKSPACE} -profile test,docker --predict_class_1 --predict_class_2 --predict_RT --spectrum_batch_size 2000 --outdir ./results
diff --git a/.github/workflows/fix-linting.yml b/.github/workflows/fix-linting.yml
index 629e8806..1b93fd7b 100644
--- a/.github/workflows/fix-linting.yml
+++ b/.github/workflows/fix-linting.yml
@@ -24,7 +24,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }}
- - uses: actions/setup-node@v2
+ - uses: actions/setup-node@v3
- name: Install Prettier
run: npm install -g prettier @prettier/plugin-php
@@ -34,9 +34,9 @@ jobs:
id: prettier_status
run: |
if prettier --check ${GITHUB_WORKSPACE}; then
- echo "::set-output name=result::pass"
+ echo "result=pass" >> $GITHUB_OUTPUT
else
- echo "::set-output name=result::fail"
+ echo "result=fail" >> $GITHUB_OUTPUT
fi
- name: Run 'prettier --write'
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
index 8a5ce69b..858d622e 100644
--- a/.github/workflows/linting.yml
+++ b/.github/workflows/linting.yml
@@ -4,6 +4,8 @@ name: nf-core linting
# that the code meets the nf-core guidelines.
on:
push:
+ branches:
+ - dev
pull_request:
release:
types: [published]
@@ -12,9 +14,9 @@ jobs:
EditorConfig:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- - uses: actions/setup-node@v2
+ - uses: actions/setup-node@v3
- name: Install editorconfig-checker
run: npm install -g editorconfig-checker
@@ -25,9 +27,9 @@ jobs:
Prettier:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- - uses: actions/setup-node@v2
+ - uses: actions/setup-node@v3
- name: Install Prettier
run: npm install -g prettier
@@ -38,7 +40,7 @@ jobs:
PythonBlack:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Check code lints with Black
uses: psf/black@stable
@@ -69,12 +71,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out pipeline code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Install Nextflow
uses: nf-core/setup-nextflow@v1
- - uses: actions/setup-python@v3
+ - uses: actions/setup-python@v4
with:
python-version: "3.7"
architecture: "x64"
@@ -97,7 +99,7 @@ jobs:
- name: Upload linting log file artifact
if: ${{ always() }}
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: linting-logs
path: |
diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml
index 04758f61..0bbcd30f 100644
--- a/.github/workflows/linting_comment.yml
+++ b/.github/workflows/linting_comment.yml
@@ -18,7 +18,7 @@ jobs:
- name: Get PR number
id: pr_number
- run: echo "::set-output name=pr_number::$(cat linting-logs/PR_number.txt)"
+ run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT
- name: Post PR comment
uses: marocchino/sticky-pull-request-comment@v2
diff --git a/.prettierignore b/.prettierignore
index eb74a574..437d763d 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -1,5 +1,6 @@
email_template.html
adaptivecard.json
+slackreport.json
.nextflow*
work/
data/
@@ -8,3 +9,4 @@ results/
testing/
testing*
*.pyc
+bin/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 820c9305..15d3e7cd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,23 @@
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## v2.4.1 nfcore/mhcquant "Maroon Gold Boxer" (patch) - 2023/04/04
+
+### `Added`
+
+- Added low resolution settings (e.g. Iontrap) [#254](https://github.com/nf-core/mhcquant/pull/254)
+
+### `Fixed`
+
+- Increased comet search, through altering the spectrum_batch_size from 500 to 0
+- [#249](https://github.com/nf-core/mhcquant/pull/249) - nf-core template update (version 2.7.2)
+- [#258](https://github.com/nf-core/mhcquant/pull/258) - Adjusted decoy strategy to reverse [#255](https://github.com/nf-core/mhcquant/issues/255) and made consistent fdr-level flags [#228](https://github.com/nf-core/mhcquant/issues/228)
+- [#845](https://github.com/nf-core/test-datasets/pull/845) - Adjusted nf-core test data set [#233](https://github.com/nf-core/mhcquant/issues/233)
+
+### `Dependencies`
+
+### `Deprecated`
+
## v2.4.0 nfcore/mhcquant "Maroon Gold Boxer" - 2022/12/02
Initial release of nf-core/mhcquant, created with the [nf-core](https://nf-co.re/) template.
diff --git a/README.md b/README.md
index 01794454..9e7efe92 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/mhcquant/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.1569909-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.1569909)
-[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.10.3-23aa62.svg)](https://www.nextflow.io/)
+[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)
[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)
[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)
[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)
@@ -26,7 +26,7 @@ On release, automated continuous integration tests run the pipeline on a full-si
## Quick Start
-1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)
+1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`)
2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.
diff --git a/assets/methods_description_template.yml b/assets/methods_description_template.yml
index eb886895..ec4f5008 100644
--- a/assets/methods_description_template.yml
+++ b/assets/methods_description_template.yml
@@ -3,7 +3,6 @@ description: "Suggested text and references to use when describing pipeline usag
section_name: "nf-core/mhcquant Methods Description"
section_href: "https://github.com/nf-core/mhcquant"
plot_type: "html"
-## TODO nf-core: Update the HTML below to your prefered methods description, e.g. add publication citation for this pipeline
## You inject any metadata in the Nextflow '${workflow}' object
data: |
Methods
diff --git a/assets/slackreport.json b/assets/slackreport.json
new file mode 100644
index 00000000..043d02f2
--- /dev/null
+++ b/assets/slackreport.json
@@ -0,0 +1,34 @@
+{
+ "attachments": [
+ {
+ "fallback": "Plain-text summary of the attachment.",
+ "color": "<% if (success) { %>good<% } else { %>danger<%} %>",
+ "author_name": "sanger-tol/readmapping v${version} - ${runName}",
+ "author_icon": "https://www.nextflow.io/docs/latest/_static/favicon.ico",
+ "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors<% } %>",
+ "fields": [
+ {
+ "title": "Command used to launch the workflow",
+ "value": "```${commandLine}```",
+ "short": false
+ }
+ <%
+ if (!success) { %>
+ ,
+ {
+ "title": "Full error message",
+ "value": "```${errorReport}```",
+ "short": false
+ },
+ {
+ "title": "Pipeline configuration",
+ "value": "<% out << summary.collect{ k,v -> k == "hook_url" ? "_${k}_: (_hidden_)" : ( ( v.class.toString().contains('Path') || ( v.class.toString().contains('String') && v.contains('/') ) ) ? "_${k}_: `${v}`" : (v.class.toString().contains('DateTime') ? ("_${k}_: " + v.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM))) : "_${k}_: ${v}") ) }.join(",\n") %>",
+ "short": false
+ }
+ <% }
+ %>
+ ],
+ "footer": "Completed at <% out << dateComplete.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM)) %> (duration: ${duration})"
+ }
+ ]
+}
diff --git a/bin/check_samplesheet.py b/bin/check_samplesheet.py
index 3435eefc..9146885b 100755
--- a/bin/check_samplesheet.py
+++ b/bin/check_samplesheet.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+# Written by Marissa Dubbelaar and released under MIT license.
+
"""Provide a command line tool to validate and transform tabular samplesheets."""
import os
@@ -147,7 +149,6 @@ def sniff_format(handle):
def check_samplesheet(file_in, file_out):
-
"""
Check that the tabular samplesheet has the structure expected by nf-core pipelines.
Validate the general shape of the table, expected columns, and each row. Also add
diff --git a/bin/get_ion_annotations.py b/bin/get_ion_annotations.py
index 0449fd25..35bffff0 100755
--- a/bin/get_ion_annotations.py
+++ b/bin/get_ion_annotations.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
-__author__ = "Jonas Scheid"
+
+# Written by Jonas Scheid and released under MIT license.
from typing import Tuple
from pyopenms import *
diff --git a/bin/markdown_to_html.py b/bin/markdown_to_html.py
index 1187b272..06bc6b10 100755
--- a/bin/markdown_to_html.py
+++ b/bin/markdown_to_html.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+
from __future__ import print_function
import argparse
import markdown
diff --git a/bin/mhcflurry_neoepitope_binding_prediction.py b/bin/mhcflurry_neoepitope_binding_prediction.py
index 27bf7dd8..31dd403a 100755
--- a/bin/mhcflurry_neoepitope_binding_prediction.py
+++ b/bin/mhcflurry_neoepitope_binding_prediction.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Lukas Heumos and released under MIT license.
+
import pandas as pd
import numpy as np
import logging
diff --git a/bin/mhcflurry_predict_mztab.py b/bin/mhcflurry_predict_mztab.py
index 57cc5a6b..a2b165b8 100755
--- a/bin/mhcflurry_predict_mztab.py
+++ b/bin/mhcflurry_predict_mztab.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Lukas Heumos and released under MIT license.
+
import argparse
import logging
import pandas as pd
diff --git a/bin/mhcflurry_predict_mztab_for_filtering.py b/bin/mhcflurry_predict_mztab_for_filtering.py
index fc4d3810..f5bd9e51 100755
--- a/bin/mhcflurry_predict_mztab_for_filtering.py
+++ b/bin/mhcflurry_predict_mztab_for_filtering.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Lukas Heumos and released under MIT license.
+
from mhcflurry import Class1AffinityPredictor
import logging
import sys
diff --git a/bin/mhcnuggets_predict_peptides.py b/bin/mhcnuggets_predict_peptides.py
index ab212e5a..d4ae30ee 100755
--- a/bin/mhcnuggets_predict_peptides.py
+++ b/bin/mhcnuggets_predict_peptides.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Lukas Heumos and released under MIT license.
+
from mhcnuggets.src.predict import predict
import argparse
import logging
diff --git a/bin/postprocess_neoepitopes_mhcnuggets.py b/bin/postprocess_neoepitopes_mhcnuggets.py
index 1982e515..5e4e0acc 100755
--- a/bin/postprocess_neoepitopes_mhcnuggets.py
+++ b/bin/postprocess_neoepitopes_mhcnuggets.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Lukas Heumos and released under MIT license.
+
import argparse
import sys
import logging
diff --git a/bin/postprocess_peptides_mhcnuggets.py b/bin/postprocess_peptides_mhcnuggets.py
index 2ab6ee53..68154f9c 100755
--- a/bin/postprocess_peptides_mhcnuggets.py
+++ b/bin/postprocess_peptides_mhcnuggets.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Lukas Heumos and released under MIT license.
+
import argparse
import sys
import logging
diff --git a/bin/preprocess_neoepitopes_mhcnuggets.py b/bin/preprocess_neoepitopes_mhcnuggets.py
index d8229030..3c08b32a 100755
--- a/bin/preprocess_neoepitopes_mhcnuggets.py
+++ b/bin/preprocess_neoepitopes_mhcnuggets.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Lukas Heumos and released under MIT license.
+
import argparse
import logging
import sys
diff --git a/bin/preprocess_peptides_mhcnuggets.py b/bin/preprocess_peptides_mhcnuggets.py
index c8192a44..f67d1b34 100755
--- a/bin/preprocess_peptides_mhcnuggets.py
+++ b/bin/preprocess_peptides_mhcnuggets.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+# Written by Lukas Heumos and released under MIT license.
+
import argparse
import logging
import sys
diff --git a/bin/resolve_neoepitopes.py b/bin/resolve_neoepitopes.py
index deae2957..5f30ab70 100755
--- a/bin/resolve_neoepitopes.py
+++ b/bin/resolve_neoepitopes.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Lukas Heumos and released under MIT license.
+
"""
Commandline tool for extracting unique neoepitopes from mztab files and the
and the FRED2 vcf_neoepitope_predictor.py script.
diff --git a/bin/variants2fasta.py b/bin/variants2fasta.py
index 8ec07f6c..b1b900da 100755
--- a/bin/variants2fasta.py
+++ b/bin/variants2fasta.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Leon Bichmann and released under MIT license.
+
import time
import sys
import argparse
@@ -72,7 +75,6 @@ def get_type(ref, alt):
with open(file, "r") as f:
for i, l in enumerate(f):
-
# skip comments
if l.startswith("#") or l.strip() == "":
continue
@@ -143,7 +145,6 @@ def get_type(ref, alt):
def main():
-
model = argparse.ArgumentParser(description="Neoepitope protein fasta generation from variant vcf")
model.add_argument("-v", "--vcf", type=str, default=None, help="Path to the vcf input file")
diff --git a/bin/vcf_neoepitope_predictor.py b/bin/vcf_neoepitope_predictor.py
index e92ba895..b4227d23 100755
--- a/bin/vcf_neoepitope_predictor.py
+++ b/bin/vcf_neoepitope_predictor.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python
+
+# Written by Leon Bichmann and released under MIT license.
+
"""
Commandline tool for (neo)epitope prediction
usage: neoepitopeprediction.py [-h]
@@ -124,7 +127,6 @@ def get_type(ref, alt):
with open(file, "r") as f:
for i, l in enumerate(f):
-
# skip comments
if l.startswith("#") or l.strip() == "":
continue
diff --git a/bin/vcf_reader.py b/bin/vcf_reader.py
index d8bb095e..743ab03b 100755
--- a/bin/vcf_reader.py
+++ b/bin/vcf_reader.py
@@ -1,3 +1,7 @@
+#!/usr/bin/env python
+
+# Written by Christopher Mohr / Mathias Walzer and released under MIT license.
+
import os
import sys
import logging
diff --git a/conf/modules.config b/conf/modules.config
index 3392f719..a53e6553 100644
--- a/conf/modules.config
+++ b/conf/modules.config
@@ -22,8 +22,7 @@ process {
withName: 'SAMPLESHEET_CHECK' {
publishDir = [
path: {"${params.outdir}/pipeline_info"},
- mode: params.publish_dir_mode,
- saveAs: {filename -> filename.equals('versions.yml') ? null : filename}
+ mode: params.publish_dir_mode
]
}
@@ -74,6 +73,7 @@ process {
"-precursor_mass_tolerance ${params.precursor_mass_tolerance}",
"-fragment_mass_tolerance ${params.fragment_mass_tolerance}",
"-fragment_bin_offset ${params.fragment_bin_offset}",
+ "-instrument ${params.instrument}",
"-num_hits ${params.num_hits}",
"-digest_mass_range ${params.digest_mass_range}",
"-max_variable_mods_in_peptide ${params.number_mods}",
@@ -129,7 +129,7 @@ process {
"-enzyme no_enzyme",
"-subset_max_train ${params.subset_max_train}",
"-doc ${params.description_correct_features} ",
- (params.fdr_level != 'psm-level-fdrs') ? "-" + params.fdr_level : ""
+ (params.fdr_level != 'psm_level_fdrs') ? "-" + params.fdr_level : ""
].join(' ').trim()
publishDir = [
path: {"${params.outdir}/intermediate_results/percolator"},
@@ -228,7 +228,7 @@ process {
"-enzyme no_enzyme",
"-subset_max_train ${params.subset_max_train}",
"-doc ${params.description_correct_features} ",
- (params.fdr_level != 'psm-level-fdrs') ? "-" + params.fdr_level : ""
+ (params.fdr_level != 'psm_level_fdrs') ? "-" + params.fdr_level : ""
].join(' ').trim()
publishDir = [
path: {"${params.outdir}/intermediate_results/refined_fdr"},
diff --git a/conf/test.config b/conf/test.config
index d7050a13..b3f4b221 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -20,7 +20,7 @@ params {
max_time = '6.h'
// Input data
- fasta = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/test.fasta'
- input = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/sample_sheet.tsv'
- allele_sheet = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/allele_sheet.tsv'
+ fasta = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/testdata/UP000005640_9606.fasta'
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/testdata/HepG2_sample_sheet.tsv'
+ allele_sheet = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/testdata/HepG2_allele_sheet.tsv'
}
diff --git a/conf/test_full.config b/conf/test_full.config
index f7bdda4e..1ba617ee 100644
--- a/conf/test_full.config
+++ b/conf/test_full.config
@@ -17,7 +17,7 @@ params {
predict_class_1 = true
// Input data
- fasta = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/test.fasta'
- input = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/sample_sheet_full.tsv'
- allele_sheet = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/allele_sheet_full.tsv'
+ fasta = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/testdata/UP000005640_9606.fasta'
+ input = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/testdata/sample_sheet_full.tsv'
+ allele_sheet = 'https://raw.githubusercontent.com/nf-core/test-datasets/mhcquant/testdata/allele_sheet_full.tsv'
}
diff --git a/docs/usage.md b/docs/usage.md
index a4e89179..e5b76cdd 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -62,7 +62,7 @@ An [example samplesheet](../assets/samplesheet.tsv) has been provided with the p
The typical command for running the pipeline is as follows:
```console
-nextflow run nf-core/mhcquant --input 'samples.tsv' --outdir --fasta 'SWISSPROT_2020.fasta' --allele_sheet 'alleles.tsv' --vcf_sheet 'variants.tsv' --include_proteins_from_vcf --predict_class_1 -profile docker
+nextflow run nf-core/mhcquant --input 'samples.tsv' --outdir --fasta 'SWISSPROT_2020.fasta' --allele_sheet 'alleles.tsv' --vcf_sheet 'variants.tsv' --include_proteins_from_vcf --predict_class_1 --spectrum_batch_size 500 -profile docker
```
This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles.
@@ -88,9 +88,9 @@ nextflow pull nf-core/mhcquant
It is a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since.
-First, go to the [nf-core/mhcquant releases page](https://github.com/nf-core/mhcquant/releases) and find the latest version number - numeric only (eg. `1.3`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3`.
+First, go to the [nf-core/mhcquant releases page](https://github.com/nf-core/mhcquant/releases) and find the latest pipeline version - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. Of course, you can switch to another version by changing the number after the `-r` flag.
-This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future.
+This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. For example, at the bottom of the MultiQC reports.
## Core Nextflow arguments
@@ -100,7 +100,7 @@ This version number will be logged in reports when you run the pipeline, so that
Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments.
-Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below. When using Biocontainers, most of these software packaging methods pull Docker containers from quay.io e.g [FastQC](https://quay.io/repository/biocontainers/fastqc) except for Singularity which directly downloads Singularity images via https hosted by the [Galaxy project](https://depot.galaxyproject.org/singularity/) and Conda which downloads and installs software locally from [Bioconda](https://bioconda.github.io/).
+Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below.
> We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported.
@@ -109,8 +109,11 @@ The pipeline also dynamically loads configurations from [https://github.com/nf-c
Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order of arguments is important!
They are loaded in sequence, so later profiles can overwrite earlier profiles.
-If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended.
+If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer enviroment.
+- `test`
+ - A profile with a complete configuration for automated testing
+ - Includes links to test data so needs no other parameters
- `docker`
- A generic configuration profile to be used with [Docker](https://docker.com/)
- `singularity`
@@ -123,9 +126,6 @@ If `-profile` is not specified, the pipeline will run locally and expect all sof
- A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/)
- `conda`
- A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud.
-- `test`
- - A profile with a complete configuration for automated testing
- - Includes links to test data so needs no other parameters
### `-resume`
@@ -172,8 +172,14 @@ Work dir:
Tip: you can replicate the issue by changing to the process work dir and entering the command `bash .command.run`
```
+#### For beginners
+
+A first step to bypass this error, you could try to increase the amount of CPUs, memory, and time for the whole pipeline. Therefor you can try to increase the resource for the parameters `--max_cpus`, `--max_memory`, and `--max_time`. Based on the error above, you have to increase the amount of memory. Therefore you can go to the [parameter documentation of rnaseq](https://nf-co.re/rnaseq/3.9/parameters) and scroll down to the `show hidden parameter` button to get the default value for `--max_memory`. In this case 128GB, you than can try to run your pipeline again with `--max_memory 200GB -resume` to skip all process, that were already calculated. If you can not increase the resource of the complete pipeline, you can try to adapt the resource for a single process as mentioned below.
+
+#### Advanced option on process level
+
To bypass this error you would need to find exactly which resources are set by the `STAR_ALIGN` process. The quickest way is to search for `process STAR_ALIGN` in the [nf-core/rnaseq Github repo](https://github.com/nf-core/rnaseq/search?q=process+STAR_ALIGN).
-We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so, based on the search results, the file we want is `modules/nf-core/software/star/align/main.nf`.
+We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so, based on the search results, the file we want is `modules/nf-core/star/align/main.nf`.
If you click on the link to that file you will notice that there is a `label` directive at the top of the module that is set to [`label process_high`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L9).
The [Nextflow `label`](https://www.nextflow.io/docs/latest/process.html#label) directive allows us to organise workflow processes in separate groups which can be referenced in a configuration file to select and configure subset of processes having similar computing requirements.
The default values for the `process_high` label are set in the pipeline's [`base.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L33-L37) which in this case is defined as 72GB.
@@ -192,7 +198,7 @@ process {
>
> If you get a warning suggesting that the process selector isn't recognised check that the process name has been specified correctly.
-### Updating containers
+### Updating containers (advanced users)
The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. If for some reason you need to use a different version of a particular tool with the pipeline then you just need to identify the `process` name and override the Nextflow `container` definition for that process using the `withName` declaration. For example, in the [nf-core/viralrecon](https://nf-co.re/viralrecon) pipeline a tool called [Pangolin](https://github.com/cov-lineages/pangolin) has been used during the COVID-19 pandemic to assign lineages to SARS-CoV-2 genome sequenced samples. Given that the lineage assignments change quite frequently it doesn't make sense to re-release the nf-core/viralrecon everytime a new version of Pangolin has been released. However, you can override the default container used by the pipeline by creating a custom config file and passing it as a command-line argument via `-c custom.config`.
diff --git a/lib/NfcoreSchema.groovy b/lib/NfcoreSchema.groovy
index b3d092f8..33cd4f6e 100755
--- a/lib/NfcoreSchema.groovy
+++ b/lib/NfcoreSchema.groovy
@@ -46,7 +46,6 @@ class NfcoreSchema {
'quiet',
'syslog',
'v',
- 'version',
// Options for `nextflow run` command
'ansi',
diff --git a/lib/NfcoreTemplate.groovy b/lib/NfcoreTemplate.groovy
index 27feb009..25a0a74a 100755
--- a/lib/NfcoreTemplate.groovy
+++ b/lib/NfcoreTemplate.groovy
@@ -32,6 +32,25 @@ class NfcoreTemplate {
}
}
+ //
+ // Generate version string
+ //
+ public static String version(workflow) {
+ String version_string = ""
+
+ if (workflow.manifest.version) {
+ def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : ''
+ version_string += "${prefix_v}${workflow.manifest.version}"
+ }
+
+ if (workflow.commitId) {
+ def git_shortsha = workflow.commitId.substring(0, 7)
+ version_string += "-g${git_shortsha}"
+ }
+
+ return version_string
+ }
+
//
// Construct and send completion email
//
@@ -61,7 +80,7 @@ class NfcoreTemplate {
misc_fields['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp
def email_fields = [:]
- email_fields['version'] = workflow.manifest.version
+ email_fields['version'] = NfcoreTemplate.version(workflow)
email_fields['runName'] = workflow.runName
email_fields['success'] = workflow.success
email_fields['dateComplete'] = workflow.complete
@@ -146,10 +165,10 @@ class NfcoreTemplate {
}
//
- // Construct and send adaptive card
- // https://adaptivecards.io
+ // Construct and send a notification to a web server as JSON
+ // e.g. Microsoft Teams and Slack
//
- public static void adaptivecard(workflow, params, summary_params, projectDir, log) {
+ public static void IM_notification(workflow, params, summary_params, projectDir, log) {
def hook_url = params.hook_url
def summary = [:]
@@ -170,7 +189,7 @@ class NfcoreTemplate {
misc_fields['nxf_timestamp'] = workflow.nextflow.timestamp
def msg_fields = [:]
- msg_fields['version'] = workflow.manifest.version
+ msg_fields['version'] = NfcoreTemplate.version(workflow)
msg_fields['runName'] = workflow.runName
msg_fields['success'] = workflow.success
msg_fields['dateComplete'] = workflow.complete
@@ -178,13 +197,16 @@ class NfcoreTemplate {
msg_fields['exitStatus'] = workflow.exitStatus
msg_fields['errorMessage'] = (workflow.errorMessage ?: 'None')
msg_fields['errorReport'] = (workflow.errorReport ?: 'None')
- msg_fields['commandLine'] = workflow.commandLine
+ msg_fields['commandLine'] = workflow.commandLine.replaceFirst(/ +--hook_url +[^ ]+/, "")
msg_fields['projectDir'] = workflow.projectDir
msg_fields['summary'] = summary << misc_fields
// Render the JSON template
def engine = new groovy.text.GStringTemplateEngine()
- def hf = new File("$projectDir/assets/adaptivecard.json")
+ // Different JSON depending on the service provider
+ // Defaults to "Adaptive Cards" (https://adaptivecards.io), except Slack which has its own format
+ def json_path = hook_url.contains("hooks.slack.com") ? "slackreport.json" : "adaptivecard.json"
+ def hf = new File("$projectDir/assets/${json_path}")
def json_template = engine.createTemplate(hf).make(msg_fields)
def json_message = json_template.toString()
@@ -209,7 +231,7 @@ class NfcoreTemplate {
if (workflow.stats.ignoredCount == 0) {
log.info "-${colors.purple}[$workflow.manifest.name]${colors.green} Pipeline completed successfully${colors.reset}-"
} else {
- log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed successfully, but with errored process(es) ${colors.reset}-"
+ log.info "-${colors.purple}[$workflow.manifest.name]${colors.yellow} Pipeline completed successfully, but with errored process(es) ${colors.reset}-"
}
} else {
log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed with errors${colors.reset}-"
@@ -297,6 +319,7 @@ class NfcoreTemplate {
//
public static String logo(workflow, monochrome_logs) {
Map colors = logColours(monochrome_logs)
+ String workflow_version = NfcoreTemplate.version(workflow)
String.format(
"""\n
${dashedLine(monochrome_logs)}
@@ -305,7 +328,7 @@ class NfcoreTemplate {
${colors.blue} |\\ | |__ __ / ` / \\ |__) |__ ${colors.yellow}} {${colors.reset}
${colors.blue} | \\| | \\__, \\__/ | \\ |___ ${colors.green}\\`-._,-`-,${colors.reset}
${colors.green}`._,._,\'${colors.reset}
- ${colors.purple} ${workflow.manifest.name} v${workflow.manifest.version}${colors.reset}
+ ${colors.purple} ${workflow.manifest.name} ${workflow_version}${colors.reset}
${dashedLine(monochrome_logs)}
""".stripIndent()
)
diff --git a/lib/WorkflowMain.groovy b/lib/WorkflowMain.groovy
index 9dba706b..f232749a 100644
--- a/lib/WorkflowMain.groovy
+++ b/lib/WorkflowMain.groovy
@@ -20,7 +20,7 @@ class WorkflowMain {
}
//
- // Print help to screen if required
+ // Generate help string
//
public static String help(workflow, params, log) {
def command = "nextflow run ${workflow.manifest.name} --input 'samples.tsv' --fasta 'SWISSPROT_2020.fasta' --allele_sheet 'alleles.tsv' --vcf_sheet 'variants.tsv' --include_proteins_from_vcf --predict_class_1 -profile docker"
@@ -33,7 +33,7 @@ class WorkflowMain {
}
//
- // Print parameter summary log to screen
+ // Generate parameter summary log string
//
public static String paramsSummaryLog(workflow, params, log) {
def summary_log = ''
@@ -54,20 +54,26 @@ class WorkflowMain {
System.exit(0)
}
- // Validate workflow parameters via the JSON schema
- if (params.validate_params) {
- NfcoreSchema.validateParameters(workflow, params, log)
+ // Print workflow version and exit on --version
+ if (params.version) {
+ String workflow_version = NfcoreTemplate.version(workflow)
+ log.info "${workflow.manifest.name} ${workflow_version}"
+ System.exit(0)
}
// Print parameter summary log to screen
-
log.info paramsSummaryLog(workflow, params, log)
+ // Validate workflow parameters via the JSON schema
+ if (params.validate_params) {
+ NfcoreSchema.validateParameters(workflow, params, log)
+ }
+
// Check that a -profile or Nextflow config has been provided to run the pipeline
NfcoreTemplate.checkConfigProvided(workflow, log)
// Check that conda channels are set-up correctly
- if (params.enable_conda) {
+ if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) {
Utils.checkCondaChannels(log)
}
diff --git a/modules.json b/modules.json
index 0d87f6cf..aa86fecc 100644
--- a/modules.json
+++ b/modules.json
@@ -7,11 +7,13 @@
"nf-core": {
"custom/dumpsoftwareversions": {
"branch": "master",
- "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
+ "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+ "installed_by": ["modules"]
},
"multiqc": {
"branch": "master",
- "git_sha": "5e34754d42cd2d5d248ca8673c0a53cdf5624905"
+ "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+ "installed_by": ["modules"]
}
}
}
diff --git a/modules/local/openms_decoydatabase.nf b/modules/local/openms_decoydatabase.nf
index f436a5c8..6811e71d 100644
--- a/modules/local/openms_decoydatabase.nf
+++ b/modules/local/openms_decoydatabase.nf
@@ -24,7 +24,8 @@ process OPENMS_DECOYDATABASE {
DecoyDatabase -in $fasta \\
-out ${prefix}.fasta \\
-decoy_string DECOY_ \\
- -decoy_string_position prefix
+ -decoy_string_position prefix \\
+ -enzyme 'no cleavage'
cat <<-END_VERSIONS > versions.yml
"${task.process}":
diff --git a/modules/local/samplesheet_check.nf b/modules/local/samplesheet_check.nf
index 48374ce2..aa75ed96 100644
--- a/modules/local/samplesheet_check.nf
+++ b/modules/local/samplesheet_check.nf
@@ -1,8 +1,8 @@
process SAMPLESHEET_CHECK {
tag "$samplesheet"
- label 'process_low'
+ label 'process_single'
- conda (params.enable_conda ? "conda-forge::python=3.8.3" : null)
+ conda "conda-forge::python=3.8.3"
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/python:3.8.3' :
'quay.io/biocontainers/python:3.8.3' }"
@@ -17,6 +17,9 @@ process SAMPLESHEET_CHECK {
when:
task.ext.when == null || task.ext.when
+ when:
+ task.ext.when == null || task.ext.when
+
script: // This script is bundled with the pipeline, in nf-core/mhcquant/bin/
"""
check_samplesheet.py \\
diff --git a/modules/nf-core/custom/dumpsoftwareversions/main.nf b/modules/nf-core/custom/dumpsoftwareversions/main.nf
index cebb6e05..3df21765 100644
--- a/modules/nf-core/custom/dumpsoftwareversions/main.nf
+++ b/modules/nf-core/custom/dumpsoftwareversions/main.nf
@@ -2,7 +2,7 @@ process CUSTOM_DUMPSOFTWAREVERSIONS {
label 'process_single'
// Requires `pyyaml` which does not have a dedicated container but is in the MultiQC container
- conda (params.enable_conda ? 'bioconda::multiqc=1.13' : null)
+ conda "bioconda::multiqc=1.13"
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' :
'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }"
diff --git a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
old mode 100644
new mode 100755
index 787bdb7b..e55b8d43
--- a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
+++ b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
@@ -1,5 +1,9 @@
#!/usr/bin/env python
+
+"""Provide functions to merge multiple versions.yml files."""
+
+
import platform
from textwrap import dedent
@@ -7,6 +11,7 @@
def _make_versions_html(versions):
+ """Generate a tabular HTML output of all versions for MultiQC."""
html = [
dedent(
"""\\
@@ -45,47 +50,53 @@ def _make_versions_html(versions):
return "\\n".join(html)
-versions_this_module = {}
-versions_this_module["${task.process}"] = {
- "python": platform.python_version(),
- "yaml": yaml.__version__,
-}
-
-with open("$versions") as f:
- versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module
-
-# aggregate versions by the module name (derived from fully-qualified process name)
-versions_by_module = {}
-for process, process_versions in versions_by_process.items():
- module = process.split(":")[-1]
- try:
- if versions_by_module[module] != process_versions:
- raise AssertionError(
- "We assume that software versions are the same between all modules. "
- "If you see this error-message it means you discovered an edge-case "
- "and should open an issue in nf-core/tools. "
- )
- except KeyError:
- versions_by_module[module] = process_versions
-
-versions_by_module["Workflow"] = {
- "Nextflow": "$workflow.nextflow.version",
- "$workflow.manifest.name": "$workflow.manifest.version",
-}
-
-versions_mqc = {
- "id": "software_versions",
- "section_name": "${workflow.manifest.name} Software Versions",
- "section_href": "https://github.com/${workflow.manifest.name}",
- "plot_type": "html",
- "description": "are collected at run time from the software output.",
- "data": _make_versions_html(versions_by_module),
-}
-
-with open("software_versions.yml", "w") as f:
- yaml.dump(versions_by_module, f, default_flow_style=False)
-with open("software_versions_mqc.yml", "w") as f:
- yaml.dump(versions_mqc, f, default_flow_style=False)
-
-with open("versions.yml", "w") as f:
- yaml.dump(versions_this_module, f, default_flow_style=False)
+def main():
+ """Load all version files and generate merged output."""
+ versions_this_module = {}
+ versions_this_module["${task.process}"] = {
+ "python": platform.python_version(),
+ "yaml": yaml.__version__,
+ }
+
+ with open("$versions") as f:
+ versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module
+
+ # aggregate versions by the module name (derived from fully-qualified process name)
+ versions_by_module = {}
+ for process, process_versions in versions_by_process.items():
+ module = process.split(":")[-1]
+ try:
+ if versions_by_module[module] != process_versions:
+ raise AssertionError(
+ "We assume that software versions are the same between all modules. "
+ "If you see this error-message it means you discovered an edge-case "
+ "and should open an issue in nf-core/tools. "
+ )
+ except KeyError:
+ versions_by_module[module] = process_versions
+
+ versions_by_module["Workflow"] = {
+ "Nextflow": "$workflow.nextflow.version",
+ "$workflow.manifest.name": "$workflow.manifest.version",
+ }
+
+ versions_mqc = {
+ "id": "software_versions",
+ "section_name": "${workflow.manifest.name} Software Versions",
+ "section_href": "https://github.com/${workflow.manifest.name}",
+ "plot_type": "html",
+ "description": "are collected at run time from the software output.",
+ "data": _make_versions_html(versions_by_module),
+ }
+
+ with open("software_versions.yml", "w") as f:
+ yaml.dump(versions_by_module, f, default_flow_style=False)
+ with open("software_versions_mqc.yml", "w") as f:
+ yaml.dump(versions_mqc, f, default_flow_style=False)
+
+ with open("versions.yml", "w") as f:
+ yaml.dump(versions_this_module, f, default_flow_style=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/modules/nf-core/multiqc/main.nf b/modules/nf-core/multiqc/main.nf
index a8159a57..68f66bea 100644
--- a/modules/nf-core/multiqc/main.nf
+++ b/modules/nf-core/multiqc/main.nf
@@ -1,7 +1,7 @@
process MULTIQC {
label 'process_single'
- conda (params.enable_conda ? 'bioconda::multiqc=1.13' : null)
+ conda "bioconda::multiqc=1.13"
container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' :
'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }"
diff --git a/nextflow.config b/nextflow.config
index 5ba91572..34011c95 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -33,8 +33,9 @@ params {
fdr_threshold = 0.01
fdr_level = 'peptide_level_fdrs'
fixed_mods = ' '
- fragment_bin_offset = 0
+ fragment_bin_offset = 0.0
fragment_mass_tolerance = 0.02
+ instrument = 'high_res'
klammer = false
max_rt_alignment_shift = 300
number_mods = 3
@@ -51,7 +52,7 @@ params {
remove_precursor_peak = false
run_centroidisation = false
skip_quantification = false
- spectrum_batch_size = 500
+ spectrum_batch_size = 0
subset_max_train = 0
use_x_ions = false
use_z_ions = false
@@ -71,18 +72,18 @@ params {
multiqc_methods_description = null
// Boilerplate options
- tracedir = "${params.outdir}/pipeline_info"
- publish_dir_mode = 'copy'
- email = null
- email_on_fail = null
- plaintext_email = false
- monochrome_logs = false
- hook_url = null
- help = false
- validate_params = true
- show_hidden_params = false
- schema_ignore_params = 'genomes'
- enable_conda = false
+ tracedir = "${params.outdir}/pipeline_info"
+ publish_dir_mode = 'copy'
+ email = null
+ email_on_fail = null
+ plaintext_email = false
+ monochrome_logs = false
+ hook_url = null
+ help = false
+ version = false
+ validate_params = true
+ show_hidden_params = false
+ schema_ignore_params = 'genomes'
// Config options
@@ -121,7 +122,7 @@ try {
profiles {
debug { process.beforeScript = 'echo $HOSTNAME' }
conda {
- params.enable_conda = true
+ conda.enabled = true
docker.enabled = false
singularity.enabled = false
podman.enabled = false
@@ -129,7 +130,7 @@ profiles {
charliecloud.enabled = false
}
mamba {
- params.enable_conda = true
+ conda.enabled = true
conda.useMamba = true
docker.enabled = false
singularity.enabled = false
@@ -145,6 +146,9 @@ profiles {
shifter.enabled = false
charliecloud.enabled = false
}
+ arm {
+ docker.runOptions = '-u $(id -u):$(id -g) --platform=linux/amd64'
+ }
singularity {
singularity.enabled = true
singularity.autoMounts = true
@@ -219,12 +223,12 @@ dag {
manifest {
name = 'nf-core/mhcquant'
- author = 'Leon Bichmann, Marissa Dubbelaar'
+ author = """Leon Bichmann, Marissa Dubbelaar"""
homePage = 'https://github.com/nf-core/mhcquant'
- description = 'Identify and quantify peptides from mass spectrometry raw data'
+ description = """Identify and quantify peptides from mass spectrometry raw data"""
mainScript = 'main.nf'
- nextflowVersion = '!>=21.10.3'
- version = '2.3.2dev'
+ nextflowVersion = '!>=22.10.1'
+ version = '2.4.1dev'
doi = '10.1021/acs.jproteome.9b00313'
}
diff --git a/nextflow_schema.json b/nextflow_schema.json
index 2025acd1..ddbf4152 100644
--- a/nextflow_schema.json
+++ b/nextflow_schema.json
@@ -124,9 +124,9 @@
"help_text": " For High-Resolution instruments a precursor mass tolerance value of 5ppm is recommended. (eg. 5)"
},
"fragment_bin_offset": {
- "type": "integer",
+ "type": "number",
"fa_icon": "fas fa-indent",
- "default": 0,
+ "default": 0.0,
"description": "Specify the fragment bin offset to be used for the comet database search.",
"help_text": "For High-Resolution instruments a fragment bin offset of 0 is recommended. (See the Comet parameter documentation: eg. 0)"
},
@@ -222,7 +222,7 @@
"spectrum_batch_size": {
"type": "integer",
"fa_icon": "fas fa-wave-sine",
- "default": 500,
+ "default": 0,
"description": "Size of Spectrum batch for Comet processing (Decrease/Increase depending on Memory Availability)"
},
"vcf_sheet": {
@@ -237,6 +237,12 @@
"default": "false",
"fa_icon": "fas fa-tags",
"description": "Set this option to create documents that are created to perform the ion annotation"
+ },
+ "instrument": {
+ "type": "string",
+ "default": "high_res",
+ "fa_icon": "fas fa-wrench",
+ "description": "Comets theoretical_fragment_ions parameter: theoretical fragment ion peak representation, high-res: sum of intensities plus flanking bins, ion trap (low-res) ms/ms: sum of intensities of central M bin only"
}
}
},
@@ -252,7 +258,7 @@
"fa_icon": "fas fa-rectangle-code",
"default": "peptide_level_fdrs",
"description": "Specify the level at which the false discovery rate should be computed.",
- "enum": ["peptide_level_fdrs", "psm-level-fdrs", "protein_level_fdrs"]
+ "enum": ["peptide_level_fdrs", "psm_level_fdrs", "protein_level_fdrs"]
},
"fdr_threshold": {
"type": "number",
@@ -498,6 +504,12 @@
"fa_icon": "fas fa-question-circle",
"hidden": true
},
+ "version": {
+ "type": "boolean",
+ "description": "Display version and exit.",
+ "fa_icon": "fas fa-question-circle",
+ "hidden": true
+ },
"publish_dir_mode": {
"type": "string",
"default": "copy",
@@ -539,7 +551,7 @@
"type": "string",
"description": "Incoming hook URL for messaging service",
"fa_icon": "fas fa-people-group",
- "help_text": "Incoming hook URL for messaging service. Currently, only MS Teams is supported.",
+ "help_text": "Incoming hook URL for messaging service. Currently, MS Teams and Slack are supported.",
"hidden": true
},
"multiqc_config": {
@@ -579,12 +591,6 @@
"description": "Show all params when using `--help`",
"hidden": true,
"help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters."
- },
- "enable_conda": {
- "type": "boolean",
- "description": "Run this workflow with Conda. You can also use '-profile conda' instead of providing this parameter.",
- "hidden": true,
- "fa_icon": "fas fa-bacon"
}
}
}
diff --git a/workflows/mhcquant.nf b/workflows/mhcquant.nf
index 0fe12160..a32e0063 100644
--- a/workflows/mhcquant.nf
+++ b/workflows/mhcquant.nf
@@ -225,8 +225,7 @@ workflow MHCQUANT {
// Return an error message when there is only a header present in the document
OPENMS_TEXTEXPORTER_FDR.out.tsv.map {
meta, tsv -> if (tsv.size() < 130) {
- log.error "It seems that there were no significant hits found for one or more samples.\nPlease consider incrementing the '--fdr_threshold' after removing the work directory or to exclude this sample."
- exit(0)
+ log.warn "It seems that there were no significant hits found for this sample: " + meta.sample + "\nPlease consider incrementing the '--fdr_threshold' after removing the work directory or to exclude this sample. "
}
}
@@ -362,7 +361,7 @@ workflow.onComplete {
}
NfcoreTemplate.summary(workflow, params, log)
if (params.hook_url) {
- NfcoreTemplate.adaptivecard(workflow, params, summary_params, projectDir, log)
+ NfcoreTemplate.IM_notification(workflow, params, summary_params, projectDir, log)
}
}