diff --git a/config/bianca/config.hg19.yaml b/config/bianca/config.hg19.yaml
deleted file mode 100644
index 18b3ded8..00000000
--- a/config/bianca/config.hg19.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-resources: "config/bianca/resources.yaml"
-singularity_schema: "config/bianca/singularity.schema.yaml"
-
-hydra_local_path: "PATH_TO_REPO"
-
-reference:
- fasta_rna: "/sw/data/CTAT_RESOURCE_LIB/2021-03/GRCh37_gencode_v19_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/ref_genome.fa"
-
-
-fusioncatcher:
- genome_path: "/sw/bioinfo/FusionCatcher/1.33/bianca/fusioncatcher/data/human_v102/"
-
-gatk_collect_allelic_counts:
- SNP_interval: "/PROJECT_DATA/cnv/gnomad_SNP_0.001_target.annotated.interval_list"
-
-gatk_denoise_read_counts:
- normal_reference: "/PROJECT_DATA/PoN/gatk_cnv_nextseq_36.hdf5"
-
-gatk_get_pileup_summaries:
- sites: "/PROJECT_DATA/ref_data/GNOMAD/small_exac_common_3.hg19.vcf.gz"
- variants: "/PROJECT_DATA/ref_data/GNOMAD/small_exac_common_3.hg19.vcf.gz"
-
-gene_fuse:
- genes: "/PROJECT_DATA/gene_fuse/GMS560_fusion_w_pool2.hg19.221117.csv"
- fasta: "/PROJECT_DATA/ref_data/hg19/hg19.with.mt.fasta"
-
-filter_vcf:
- snv_soft_filter: "config/filters/config_soft_filter_uppsala_vep107.yaml"
- snv_hard_filter: "config/filters/config_hard_filter_uppsala_vep107.yaml"
- germline: "config/filters/config_hard_filter_germline_vep107.yaml"
-
-hotspot_annotation:
- hotspots: "/PROJECT_DATA/design/Hotspots_combined_regions_nodups.csv"
-
-hotspot_info:
- hotspot_mutations: "/PROJECT_DATA/design/Hotspots_combined_regions_nodups.csv"
-
-hotspot_report:
- hotspot_mutations: "/PROJECT_DATA/design/Hotspots_combined_regions_nodups.csv"
-
-manta_config_t:
- extra: "--exome --callRegions /PROJECT_DATA/design/pool1_pool2.sort.merged.padded20.cnv200.hg19.split_fusion_genes.210608.bed.gz"
-
-mosdepth:
- extra: "--no-per-base --fast-mode"
-
-msisensor_pro:
- PoN: "/PROJECT_DATA/PoN/Msisensor_pro_reference_nextseq_36.list_baseline"
-
-purecn:
- extra: "--model betabin --mapping-bias-file /PROJECT_DATA/purecn/mapping_bias_nextseq_27_hg19.rds"
- normaldb: "/PROJECT_DATA/purecn/normalDB_nextseq_27_hg19.rds"
- intervals: "/PROJECT_DATA/purecn/targets_twist-gms-st_hg19_25000_intervals.txt"
-
-purecn_coverage:
- intervals: "/PROJECT_DATA/purecn/targets_twist-gms-st_hg19_25000_intervals.txt"
-
-report_fusions:
- annotation_bed: "/PROJECT_DATA/rna_fusion/Twist_RNA_fusionpartners.bed"
-
-report_gene_fuse:
- filter_fusions: "/PROJECT_DATA/gene_fuse/filter_fusions_20230214.csv"
-
-star:
- genome_index: "/sw/data/reference/Homo_sapiens/hg19/program_files/star/concat"
- extra: "--quantMode GeneCounts --sjdbGTFfile /sw/data/reference/Homo_sapiens/GRCh37/program_files/lifescope/refGene/refGene.hg19.20101221.gtf --outSAMtype BAM SortedByCoordinate --chimSegmentMin 10 --chimOutType WithinBAM SoftClip --chimJunctionOverhangMin 10 --chimScoreMin 1 --chimScoreDropMax 30 --chimScoreJunctionNonGTAG 0 --chimScoreSeparation 1 --alignSJstitchMismatchNmax 5 -1 5 5 --chimSegmentReadGapMax 3"
-
-star_fusion:
- genome_path: "/sw/data/CTAT_RESOURCE_LIB/2021-03/GRCh37_gencode_v19_CTAT_lib_Mar012021.plug-n-play/ctat_genome_lib_build_dir/"
-
-svdb_query:
- db_string: "--db /PROJECT_DATA/SVDB/all_TN_292_svdb_0.8_20220505.vcf --out_frq Twist_AF --out_occ Twist_OCC"
-
-vep:
- vep_cache: "/sw/data/vep/107"
\ No newline at end of file
diff --git a/config/bianca/config.yaml b/config/bianca/config.yaml
new file mode 100644
index 00000000..39afa78b
--- /dev/null
+++ b/config/bianca/config.yaml
@@ -0,0 +1,2 @@
+resources: "config/bianca/resources.yaml"
+singularity_schema: "config/bianca/singularity.schema.yaml"
\ No newline at end of file
diff --git a/docs/run_on_bianca.md b/docs/run_on_bianca.md
index 1296d1dc..0d2d4057 100644
--- a/docs/run_on_bianca.md
+++ b/docs/run_on_bianca.md
@@ -1,211 +1,14 @@
-Bianca is a cluster without access to internet which causes some problems for pipelines relying on resources found online. But it's easy to handle=)
+Follow the instructions [RUNNING THE PIPELINE -> Closed System](run_on_closed_env.md)
-# Preperations
+# Setup environment
-```bash
-# Set Twist Solid version
-TAG_OR_BRANCH="v0.6.1"
-
-# Clone selected version
-git clone --branch ${VERSION} https://github.com/genomic-medicine-sweden/Twist_Solid.git
-cd Twist_Solid
-virtualenv venv && source venv/bin/activate
-pip install -r requirements.txt
-```
-
-## Fetach resources
-
-### Download reference files
-
-```bash
-# NextSeq
- hydra-genetics references download -o design_and_ref_files -v config/references/design_files.hg19.yaml -v config/references/nextseq.hg19.pon.yaml -v config/references/references.hg19.yaml
-
- #NovaSeq, not all files are prepare for novaseq
- hydra-genetics references download -o design_and_ref_files -v config/references/design_files.hg19.yaml -v config/references/novaseq.hg19.pon.yaml -v config/references/references.hg19.yaml
-```
-
-## Download Containers
-```bash
-# NOTE: singularity command need to be available for this step
-hydra-genetics prepare-environment create-singularity-files -c config/config.yaml -o singularity_cache
-```
-
-## Environment
-
-Create an environment, on a computer/server with access to internet, that can be moved to bianca.
-
-### Conda version
-
-Requires:
-
- - conda
-
-```bash
-# Build compressed file containing, named Twist_Solid_{TAG_OR_BRANCH}.tar.gz
-# - Twist Solid Pipeline
-# - snakemake-wrappers
-# - hydra-genetics modules
-# - conda env
-TAG_OR_BRANCH="vX.Y.X" bash build/build_conda.sh
-```
-
-
-### Singularity version
-Requires:
-
- - docker
- - singularity
-
-```bash
-# Build compressed file containing
-# - Twist Solid Pipeline
-# - snakemake-wrappers
-# - hydra-genetics modules
-# - conda env
-TAG_OR_BRANCH="vX.Y.X" bash build/build_container.sh
-```
-
----
-
-# Bianca
-The following file/folders need to be uploaded to bianca, with sftp:
-
-1. design_and_ref_files
-2. Twist_Solid_{TAG_OR_BRANCH}.tar.gz: for conda
-3. Twist_Solid_{TAG_OR_BRANCH}.sif: for singularity
-3. singularity_cache
-
----
-
-# On bianca
-
-## Setup environment
-### Conda
-
-#### Unpack environment and activate
-```bash
-# Extract tar.
-TAG_OR_BRANCH=develop
-tar -xvf Twist_Solid_${TAG_OR_BRANCH}.tar.gz
-cd Twist_Solid_{TAG_OR_BRANCH}
-mkdir venv && tar xvf env.tar.gz -C venv/
-source venv/bin/activate
-
-# Variable that will be used lated
-PATH_TO_ENV=${PWD}
-PATH_TO_HYDRA_MODULES=${PATH_TO_EXTRACTED_TAR_GZ}/hydra-genetics
-```
-
-#### Create config and profile
-
-*config files*
-
-```bash
-PATH_TO_design_and_ref_files="PATH_TO_UPLOADED_DESIGN_AND_REF_FILES"
-PATH_TO_singularity_cache="PATH_TO_UPLOADED_SINGULARITY_CACHE"
-# Conda environment still need to be active
-# Prepare config
-cd $PATH_TO_EXTRACTED_TAR_GZ/Twist_Solid
-cp config/config.yaml config/config.yaml.copy
-cp config/bianca/config.hg19.yaml config/bianca/config.hg19.yaml.copy
-
-# Update design and ref files location and hydra-genetics module location
-# Update hydra module location
-# Make sure the environment still is active
-hydra-genetics prepare-environment reference-path-update -c config/bianca/config.hg19.yaml.copy -n config/bianca/config.hg19.yaml --reference-path /PROJECT_DATA:${PATH_TO_design_and_ref_files} --reference-path PATH_TO_REPO:${PATH_TO_HYDRA_MODULES}
-
-# Make use of local singularities: ex /proj/sens2022566/nobackup/patriksm/singularity_cache
-hydra-genetics prepare-environment container-path-update -c config/config.yaml.copy -n config/config.yaml -p ${PATH_TO_singularity_cache}
-
-# Ex: updating configs
-# - ref files att /proj/sens2022566/nobackup/patriksm/design_and_ref_files
-# - module at /proj/sens2022566/nobackup/patriksm/Twist_Solid_{TAG_OR_BRANCH}/hydra-genetics
-# command: hydra-genetics prepare-environment reference-path-update -c config.bianca.hg19.yaml.copy -n config.bianca.hg19.v1.yaml --reference-path /PROJECT_DATA:/proj/sens2022566/nobackup/patriksm/design_and_ref_files --reference-path PATH_TO_REPO:/proj/sens2022566/nobackup/patriksm/Twist_Solid_add-validation-ref-yaml/hydra-genetics
-# - singularity cache at
-# command: hydra-genetics prepare-environment container-path-update -c config.bianca.hg19.v1.yaml -n config.bianca.hg19.v2.yaml -p /proj/sens2022566/nobackup/patriksm/singularity_cache
-```
-
-*profile*
-
-Edit bianca profile Twist_Solid_${TAG_OR_BRANCH}/Twist_Solid/profiles/bianca/config.yaml
-```yaml
-# Found at Twist_Solid_{TAG_OR_BRANCH}/snakemake-wrappers, use absolute_path with 'git+file:/'
-wrapper-prefix="PATH_TO_WRAPPERS"
-# ex: wrapper-prefix="git+file://proj/sens2022566/nobackup/patriksm/Twist_Solid_add-{TAG_OR_BRANCH}/snakemake-wrappers/"
-
-# Update account info, change ADD_YOUR_ACCOUNT to your bianca project id
-drmaa: " -A ADD_YOUR_ACCOUNT -N 1-1 -t {resources.time} -n {resources.threads} --mem={resources.mem_mb} --mem-per-cpu={resources.mem_per_cpu} --mem-per-cpu={resources.mem_per_cpu} --partition={resources.partition} -J {rule} -e slurm_out/{rule}_%j.err -o slurm_out/{rule}_%j.out"
-```
-
-#### Validate config files
+1. Copy updated config folder to the working directory
+2. Update bianca profile to match your projects
+3. Create input files using: hydra-genetics create-input-files
+4. Run the pipeline
-```bash
-# This will make sure that all design and reference files exists and haven't changed
-# Warnings for possible file PATH/hydra-genetics and missing tbi files in config can be ignored
-hydra-genetics --debug references validate -c config/config.yaml -c config/bianca/config.hg19.yaml -v config/references/design_files.hg19.yaml -v config/references/references.bianca.hg19.yaml -v config/references/nextseq.hg19.pon.yaml -v config/references/references.hg19.yaml -p ${PATH_TO_design_and_ref_files}
-```
-
-### Singularity
-
-#### Setup env, snakemake-wrappers and Hydra-Genetics modules
-
-```bash
-# Extract tar.
-singularity run --app copy-twist-solid-env twist_solid_add-validation-ref-yaml.sif Pipeline
-singularity run --app copy-twist-pipeline twist_solid_add-validation-ref-yaml.sif Pipeline
-singularity run --app copy-twist-solid-env twist_solid_add-validation-ref-yaml.sif Pipeline
-singularity run --app copy-hydra-modules twist_solid_add-validation-ref-yaml.sif Pipeline
-singularity run --app copy-snakemake-wrappers twist_solid_add-validation-ref-yaml.sif Pipeline
-
-source Pipeline/twist_solid_venv/bin/activate
-
-```
-
-#### Create config and profile
-```bash
-# Create config folder with updated paths
-singularity run --app create-config-folder-bianca twist_solid.sif /PATH/singularity_cache /PATH/design_and_ref_files /PATH/Pipeline
-
-# Create profile with project id and path to snakemake-wrappers
-singularity run --app create-profile-bianca twist_solid_add-validation-ref-yaml.sif sensXXXX /PATH/Pipeline/snakemake-wrappers
-
-```
-
-#### Validate config
-```bash
-singularity run --app validate-config-hg19-bianca twist_solid_add-validation-ref-yaml.sif -c /PATH_TO_UPDATE/config/config.yaml -c /PATH_TO_UPDATE/config/bianca/config.hg19.yaml -p /proj/sensXXXX/nobackup/USER/design_and_ref_files
-```
-
-## Run Pipeline
-
-```bash
-# Create analysis
-mkdir analysis
-# Enter folder
-cd analysis
-# Copy config files
-cp -r PATH_TO_UPDATED_CONFIGS/config .
-
-# Create samples.tsv and units.tsv
-```
-
-### Manually
-
-```bash
-module load slurm-drmaa
-
-# Enter runfolder
-cd PATH_TO_ANALYSIS_FOLDER
-
-# For conda
-source /{PATH_TO_ENV}/venv/bin/activate
-snakemake -s /{PATH_TO_PIPELINE}/Twist_Solid/workflow/Snakefile --profile ${PATH_TO_UPDATED_PROFILE}/Twist_Solid/profiles/bianca
-
-# Note that bianca may close your session before the workflow is done
-```
-
-### SBATCH
+It's recommended to run your job using a slurm script, since the login node my close down due to inactivity.
+# SBATCH
*run_pipeline.sh*
```bash
@@ -230,4 +33,54 @@ source ${PATH_TO_EXTRACTED_CONDA_ENV}/venv/bin/activate
snakemake -s ${PATH_TO_FOLDER_WITH_PIPELINE}/Twist_Solid/workflow/Snakefile --profile ${PATH_TO_UPDATED_PROFILE}/bianca
+```
+
+# Example setup
+
+## Folder structure
+
+```bash
+proj/sensYYYXXX/nobackup/username
+|---design_and_ref_files
+| |---GMS560
+| | |---Artifact/
+| | |---Background/
+| | |---design/
+| | |---PoN/
+|---ref_data
+|---Twist_Solid_env
+| |---venv/ # conda enc
+| |---hydra-genetics/ # Modules
+| |---snakemake-wrappers/ #Wrappers
+| |---Twist Solid/ # Pipeline
+| |---profiles/bianca ยค Update profile
+| |---config/ # Updated config
+| |---workflow/
+| |---Snakefile
+|---analysis
+| |---samples.tsv
+| |---units.tsv
+| |---configs/ # Copied config
+|
+```
+
+Point to uploaded reference files
+```yaml
+# config/config.data.hg19.yaml
+# Update the following lines:
+PROJECT_DESIGN_DATA: "/proj/sensYYYXXX/nobackup/username/design_and_ref_files"
+PROJECT_PON_DATA: "/proj/sensYYYXXX/nobackup/username/design_and_ref_files"
+PROJECT_REF_DATA: "/proj/sensYYYXXX/nobackup/username/design_and_ref_files"
+```
+
+**config.yaml**
+```yaml
+# Update the following line
+hydra_local_path: "/proj/sensYYYXXX/nobackup/username/Twist_Solid_env/hydra-genetics"
+
+**profiles/bianaca/config.yaml**
+```yaml
+wrapper-prefix="/proj/sensYYYXXX/nobackup/username/Twist_Solid_env/snakemake-wrappers"
+
+drmaa: " -A sensYYYXXX -N 1-1 -t {resources.time} -n {resources.threads} --mem={resources.mem_mb} --mem-per-cpu={resources.mem_per_cpu} --mem-per-cpu={resources.mem_per_cpu} --partition={resources.partition} -J {rule} -e slurm_out/{rule}_%j.err -o slurm_out/{rule}_%j.out"
```
\ No newline at end of file
diff --git a/docs/run_on_closed_env.md b/docs/run_on_closed_env.md
new file mode 100644
index 00000000..d8a8089c
--- /dev/null
+++ b/docs/run_on_closed_env.md
@@ -0,0 +1,167 @@
+# Preperations
+Fetch the pipeline and install requirements
+
+```bash
+# Set Twist Solid version
+TAG_OR_BRANCH="vX.Y.X"
+
+# Clone selected version
+git clone --branch ${VERSION} https://github.com/genomic-medicine-sweden/Twist_Solid.git
+cd Twist_Solid
+python3.0 -m venv venv && source venv/bin/activate
+pip install -r requirements.txt
+```
+
+## Fetach resources
+
+### Download reference files
+
+```bash
+# NextSeq
+ hydra-genetics --debug references download -o design_and_ref_files -v config/references/design_files.hg19.yaml -v config/references/nextseq.hg19.pon.yaml -v config/references/references.hg19.yaml
+
+ #NovaSeq, not all files are prepare for novaseq
+ hydra-genetics references download -o design_and_ref_files -v config/references/design_files.hg19.yaml -v config/references/novaseq.hg19.pon.yaml -v config/references/references.hg19.yaml
+
+ # Compress data
+tar -czvf design_and_ref_files.tar.gz design_and_ref_files
+```
+
+## Download Containers
+```bash
+# NOTE: singularity command need to be available for this step
+hydra-genetics prepare-environment create-singularity-files -c config/config.yaml -o singularity_cache
+```
+
+## Environment
+
+Create an environment, on a computer/server with access to internet, that can be moved to bianca.
+
+Requires:
+
+ - conda
+
+```bash
+# Build compressed file containing, named Twist_Solid_{TAG_OR_BRANCH}.tar.gz
+# - Twist Solid Pipeline
+# - snakemake-wrappers
+# - hydra-genetics modules
+# - conda env
+TAG_OR_BRANCH="vX.Y.X" bash build/build_conda.sh
+```
+
+---
+
+# Files/Folders
+The following file/folders have been create and need to be moved to your server:
+
+1. file: design_and_ref_files.tar.gz
+2. file Twist_Solid_{TAG_OR_BRANCH}.tar.gz
+3. folder: singularity_cache
+
+---
+
+# On Server
+
+## Setup environment
+
+
+### Unpack environment and activate
+```bash
+# Extract tar.
+TAG_OR_BRANCH="vX.Y.X"
+tar -xvf Twist_Solid_${TAG_OR_BRANCH}.tar.gz
+cd Twist_Solid_{TAG_OR_BRANCH}
+mkdir venv && tar xvf env.tar.gz -C venv/
+source venv/bin/activate
+
+# Variable that will be used lated
+PATH_TO_ENV=${PWD}
+PATH_TO_HYDRA_MODULES=${PWD}/hydra-genetics
+PATH_TO_FOLDER_WITH_PIPELINE=${PWD}/Twist_Solid
+```
+
+### Decompress reference files
+
+```bash
+tar -xvf design_and_ref_files.tar.gz
+```
+
+### Singularities
+Move singularity cache to appropriate location
+
+## Modify config and profile
+
+### Resource
+
+Make sure that ```config/resource.yaml``` match your system setup, ex:
+ - partition
+ - number of cores
+ - memory
+
+### config.data.hg19.yaml files
+
+Point to uploaded reference files
+```yaml
+# config/config.data.hg19.yaml
+# Update the following lines:
+PROJECT_DESIGN_DATA: "{EXTRACT_PATH}/design_and_ref_files"
+PROJECT_PON_DATA: "{EXTRACT_PATH}/design_and_ref_files"
+PROJECT_REF_DATA: "{EXTRACT_PATH}/design_and_ref_files"
+```
+
+### Config.yaml files
+
+Set path for hydra-genetics modules
+```yaml
+# Update the following line
+hydra_local_path: "{PATH_TO_EXTRACTED_ENV}/hydra-genetics"
+```
+
+Add path to local singularities
+```bash
+# config/config.yaml
+# Make sure the environment is active
+cp config/config.yaml config/config.yaml.copy
+hydra-genetics prepare-environment container-path-update -c config/config.yaml.copy -n config/config.yaml -p ${PATH_TO_singularity_cache}
+```
+
+### Profile
+
+Copy a profile and modify it to match your system, ex```Twist_Solid_${TAG_OR_BRANCH}/Twist_Solid/profiles/bianca/config.yaml```
+```yaml
+# Found at Twist_Solid_{TAG_OR_BRANCH}/snakemake-wrappers, use absolute_path with 'git+file:/'
+wrapper-prefix="PATH_TO_WRAPPERS"
+# ex: wrapper-prefix="git+file://proj/sens2022566/nobackup/patriksm/Twist_Solid_add-{TAG_OR_BRANCH}/snakemake-wrappers/"
+
+# Update account info, change ADD_YOUR_ACCOUNT to your bianca project id
+drmaa: " -A ADD_YOUR_ACCOUNT -N 1-1 -t {resources.time} -n {resources.threads} --mem={resources.mem_mb} --mem-per-cpu={resources.mem_per_cpu} --mem-per-cpu={resources.mem_per_cpu} --partition={resources.partition} -J {rule} -e slurm_out/{rule}_%j.err -o slurm_out/{rule}_%j.out"
+```
+
+## Validate config files
+
+```bash
+# This will make sure that all design and reference files exists and haven't changed
+# Warnings for possible file PATH/hydra-genetics and missing tbi files in config can be ignored
+hydra-genetics --debug references validate -c config/config.yaml -c config/config.data.hg19.yaml -v config/references/design_files.hg19.yaml -v config/references/nextseq.hg19.pon.yaml -v config/references/references.hg19.yaml -p ${PATH_TO_design_and_ref_files}
+```
+
+## Run Pipeline
+
+```bash
+# Create analysis
+mkdir analysis
+# Enter folder
+cd analysis
+# Copy config files
+cp -r PATH_TO_UPDATED_CONFIGS/config .
+
+# Create samples.tsv and units.tsv
+# https://hydra-genetics.readthedocs.io/en/latest/create_sample_files/
+# remember to update tumor content value (TC) in samples.tsv for DNA samples
+hydra-genetics create-input-files -d PATH_TO_FASTQ_FILE -p NovaSeq6000 -a AGATCGGAAGAGCACACGTCTGAACTCCAGTCA,AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT
+
+# Make sure slurm-drmaa is available
+source /{PATH_TO_ENV}/venv/bin/activate
+snakemake -s /{PATH_TO_PIPELINE}/Twist_Solid/workflow/Snakefile --profile ${PATH_TO_UPDATED_PROFILE}/Twist_Solid/profiles/bianca
+```
\ No newline at end of file
diff --git a/docs/running.md b/docs/running.md
index df8b409a..60722b9b 100644
--- a/docs/running.md
+++ b/docs/running.md
@@ -70,16 +70,16 @@ pip install -r requirements.txt
# make sure that TMPDIR points to a location with a lot of storage, it
# will be required to fetch reference data
# export TMPDIR=/PATH_TO_STORAGE
-hydra-genetics --verbose references download -o design_and_ref_files -v config/references/references.hg19.yaml -v config/references/design_files.hg19.yaml -v config/references/nextseq.hg19.pon.yaml
+hydra-genetics --debug --verbose references download -o design_and_ref_files -v config/references/references.hg19.yaml -v config/references/design_files.hg19.yaml -v config/references/nextseq.hg19.pon.yaml
```
**Update config**
```yaml
# file config/config.data.hg19.yaml
# change rows:
-PROJECT_DESIGN_DATA: "" # parent folder for GMS560 design, ex GMS560/design
-PROJECT_PON_DATA: "" # artifact/background/PoN, ex GMS560/PoN
-PROJECT_REF_DATA: "" # parent folder for ref_data, ex ref_data/hg19
+PROJECT_DESIGN_DATA: "PATH_TO/design_and_ref_files" # parent folder for GMS560 design, ex GMS560/design
+PROJECT_PON_DATA: "PATH_TO/design_and_ref_files" # artifact/background/PoN, ex GMS560/PoN
+PROJECT_REF_DATA: "PATH_TO/design_and_ref_files" # parent folder for ref_data, ex ref_data/hg19
```
@@ -92,7 +92,7 @@ hydra-genetics create-input-files -d path/to/fastq-files/
## Run command
Using the activated python virtual environment created above, this is a basic command for running the pipeline:
```bash
-snakemake --profile profiles/ -s workflow/Snakefile
+snakemake --profile profiles/NAME_OF_PROFILE -s workflow/Snakefile
```
The are many additional [snakemake running options](https://snakemake.readthedocs.io/en/stable/executing/cli.html#) some of which is listed below. However, options that are always used should be put in the [profile](https://hydra-genetics.readthedocs.io/en/latest/profile/).
diff --git a/mkdocs.yml b/mkdocs.yml
index cc4b37b3..3022696a 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -8,6 +8,7 @@ nav:
- Reference files: references.md
- Running the pipeline:
- Basic: running.md
+ - Closed system: run_on_closed_env.md
- Bianca: run_on_bianca.md
- DNA Pipeline:
- Prealignment: dna_prealignment.md
diff --git a/profiles/bianca/config.yaml b/profiles/bianca/config.yaml
index fa25c2b8..7472135e 100644
--- a/profiles/bianca/config.yaml
+++ b/profiles/bianca/config.yaml
@@ -3,7 +3,7 @@ keep-going: True
restart-times: 2
rerun-incomplete: True
use-singularity: True
-configfiles: ["config/config.yaml", "config/bianca/config.hg19.yaml"]
+configfiles: ["config/config.yaml", "config/config.hg19.yaml", "config/bianca/config.yaml"]
singularity-args: "-e --cleanenv -B /sw -B /proj -B /storage -B /castor "
wrapper-prefix: "PATH_TO_WRAPPERS"
drmaa: " -A ADD_YOUR_ACCOUNT -N 1-1 -t {resources.time} -n {resources.threads} --mem={resources.mem_mb} --mem-per-cpu={resources.mem_per_cpu} --mem-per-cpu={resources.mem_per_cpu} --partition={resources.partition} -J {rule} -e slurm_out/{rule}_%j.err -o slurm_out/{rule}_%j.out"