diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md index ec4291ed4d4fb318fac1d71327279306107d2bb2..d0a891d8fd37be998fc92e316c33fd957e46b7fa 100644 --- a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md +++ b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md @@ -9,7 +9,7 @@ with Allinea Performance Reports (`perf-report`) on Crane is shown below: {{% panel theme="info" header="blastn_perf_report.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=BlastN #SBATCH --nodes=1 #SBATCH --ntasks=16 diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md index cdb774983281fba161e0244c1496bd0b8dfd49e6..eae0db2a90c70697ea3a16bc610b05ebf62ad412 100644 --- a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md +++ b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md @@ -9,7 +9,7 @@ below: {{% panel theme="info" header="lammps_perf_report.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=LAMMPS #SBATCH --ntasks=64 #SBATCH --time=12:00:00 diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md index 28b25aa5027206c22b448fe4544288dd6ee91630..1a21e017817b79a4d2a478c6774169b7a08a8a86 100644 --- a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md +++ b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md @@ -8,7 +8,7 @@ with Allinea PerformanceReports (`perf-report`) is shown below: {{% panel theme="info" header="ray_perf_report.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Ray #SBATCH --ntasks-per-node=16 #SBATCH --time=10:00:00 diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md index 4caf0d48eced0f41236b698dbb744b15f4b29709..d01dfee4efef219f8eef827a38c7d688773748d7 100644 --- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md +++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md @@ -15,7 +15,7 @@ where **input_reads.fasta** is the input file containing all sequences that need Simple example of how **makeblastdb** can be run on Crane using SLURM script and nucleotide database is shown below: {{% panel header="`blast_db.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Blast_DB #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md index 9f8f603efd81ad53bfba8b46a01480dbc91cdb49..4c2ffc08a933dddfb5861fd53fe942bb50220221 100644 --- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md +++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md @@ -52,7 +52,7 @@ Basic SLURM example of nucleotide BLAST run against the non-redundant **nt** BL {{% panel header="`blastn_alignment.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=BlastN #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 @@ -92,7 +92,7 @@ Basic SLURM example of protein BLAST run against the non-redundant **nr **BLAS {{% panel header="`blastx_alignment.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=BlastX #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md index 9113bca24e2af8b1d905f63d87835b1ff65d4ac5..6d41e9c93483121dd0b213a4bfce3c77d47e3ab9 100644 --- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md +++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md @@ -24,7 +24,7 @@ $ blat Running BLAT on Crane with query file `input_reads.fasta` and database `db.fa` is shown below: {{% panel header="`blat_alignment.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Blat #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md index 41aaf03dbb2c4177d1e9e0fbbf471b88fe9345e2..ae79a451468afd15f08c0c926c5b8dcf76e07d3c 100644 --- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md +++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md @@ -28,7 +28,7 @@ Bowtie supports both single-end (`input_reads.[fasta|fastq]`) and paired-end (` An example of how to run Bowtie alignment on Crane with single-end fastq file and `8 CPUs` is shown below: {{% panel header="`bowtie_alignment.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Bowtie #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md index 8d091433c32099f6cd7e05525f9f4b722adb7bae..f6054dbf87a9768c348adc99f64c9e507c77b1c2 100644 --- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md +++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md @@ -34,7 +34,7 @@ where **index_prefix** is the generated index using the **bowtie2-build** co An example of how to run Bowtie2 local alignment on Crane with paired-end fasta files and `8 CPUs` is shown below: {{% panel header="`bowtie2_alignment.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Bowtie2 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md index 02ad668349887eea242ae8f31fe3c84a1cf803b6..7e96edbcf770fe804cbd6d8e6606c043dbba705d 100644 --- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md +++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md @@ -25,7 +25,7 @@ where **index_prefix** is the index for the reference genome generated from **bw Simple SLURM script for running **bwa mem** on Crane with paired-end fastq input data, `index_prefix` as reference genome index, SAM output file and `8 CPUs` is shown below: {{% panel header="`bwa_mem.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Bwa_Mem #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md index 6aeebfae3f8913f7cb4f333b94701bad3eca6a41..95b625b2ec7f5198d57434ce2ae2472e9fc16e68 100644 --- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md +++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md @@ -33,7 +33,7 @@ $ clustalo -h Running Clustal Omega on Crane with input file `input_reads.fasta` with `8 threads` and `10GB memory` is shown below: {{% panel header="`clustal_omega.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Clustal_Omega #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md index b80475689224d4b58f329dd4215c66f04b5d8f39..c0590f6a8a2d5994e10249e0c100afb762f58e0e 100644 --- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md +++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md @@ -30,7 +30,7 @@ Prior running TopHat/TopHat2, an index from the reference genome should be built An example of how to run TopHat2 on Crane with paired-end fastq files `input_reads_pair_1.fastq` and `input_reads_pair_2.fastq`, reference index `index_prefix` and `8 CPUs` is shown below: {{% panel header="`tophat2_alignment.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Tophat2 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/biodata_module.md b/content/applications/app_specific/bioinformatics_tools/biodata_module.md index c4b7c0a3037fc24efbac7e684fff85f6fd40c061..9775036525e50b6dd0a0a25a2e64b288dc20c8e0 100644 --- a/content/applications/app_specific/bioinformatics_tools/biodata_module.md +++ b/content/applications/app_specific/bioinformatics_tools/biodata_module.md @@ -43,7 +43,7 @@ $ ls $BLAST An example of how to run Bowtie2 local alignment on Crane utilizing the default Horse, *Equus caballus* index (*BOWTIE2\_HORSE*) with paired-end fasta files and 8 CPUs is shown below: {{% panel header="`bowtie2_alignment.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Bowtie2 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 @@ -64,7 +64,7 @@ bowtie2 -x $BOWTIE2_HORSE -f -1 input_reads_pair_1.fasta -2 input_reads_pair_2.f An example of BLAST run against the non-redundant nucleotide database available on Crane is provided below: {{% panel header="`blastn_alignment.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=BlastN #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md index 07a0275202f5beb0fef6cd85f1d80366fb70cdf3..8cc866a115b4539431844243c82f6671b56b87f9 100644 --- a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md +++ b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md @@ -19,7 +19,7 @@ where the option **-format** specifies the type of the output file, **input_a Running BamTools **convert** on Crane with input file `input_alignments.bam` and output file `output_reads.fastq` is shown below: {{% panel header="`bamtools_convert.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=BamTools_Convert #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md index ac5c097afed0a7b36182b6dba3f748027efd0fe9..9e84ab36c0b84aea6a07d15e78e7367e40b82858 100644 --- a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md +++ b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md @@ -17,7 +17,7 @@ where **input_alignments.[bam|sam]** is the input file with the alignments in BA Running **samtools view** on Crane with `8 CPUs`, input file `input_alignments.sam` with available header (**-S**), output in BAM format (**-b**) and output file `output_alignments.bam` is shown below: {{% panel header="`samtools_view.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=SAMtools_View #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md index d2704b9f8f319a03e749e482c0a137dcd6edbe6d..250963fff0bec0979bea53250af124889cff769c 100644 --- a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md +++ b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md @@ -21,7 +21,7 @@ $ fastq-dump [options] input_reads.sra An example of running **fastq-dump** on Crane to convert SRA file containing paired-end reads is: {{% panel header="`sratoolkit.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=SRAtoolkit #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md index 82278e8858fb1b26ea367756a8e1e98ab062e929..144583834436bb528ea8da9b98e42473fe6be4e9 100644 --- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md +++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md @@ -28,7 +28,7 @@ Oases has a lot of parameters that can be found in its [manual](https://www.ebi. A simple SLURM script to run Oases on the Velvet output stored in `output_directory/` with minimum transcript length of `200` is shown below: {{% panel header="`oases.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Velvet_Oases #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md index 7e57db1f1d964c035c1cfc2cca6879170bf42cde..99d4f63be823a392599be042f4bda1a8a3fe7a78 100644 --- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md +++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md @@ -41,7 +41,7 @@ Ray supports odd values for k-mer equal to or greater than 21 (`-k <kmer_value>` Simple SLURM script for running Ray with both paired-end and single-end data with `k-mer=31`, `8 CPUs` and `4 GB RAM per CPU` is shown below: {{% panel header="`ray.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Ray #SBATCH --ntasks=8 #SBATCH --time=168:00:00 diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md index d4248506b850803365f7207921a62ed533ddd7da..eae39ef874d29f673a650d2dc590ad6611f2585d 100644 --- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md +++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md @@ -97,7 +97,7 @@ After creating the configuration file **configFile**, the next step is to run th Simple SLURM script for running SOAPdenovo2 with `k-mer=31`, `8 CPUSs` and `50GB of RAM` is shown below: {{% panel header="`soapdenovo2.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=SOAPdenovo2 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md index 086a9ed4b3fc71fdaadf95fa86f370ed8dce7a4b..22fa8383adcd446328cf2e6a354bc5035e77ac13 100644 --- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md +++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md @@ -10,7 +10,7 @@ weight = "10" The first step of running Trinity is to run Trinity with the option **--no_run_inchworm**: {{% panel header="`trinity_step1.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Trinity_Step1 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 @@ -29,7 +29,7 @@ Trinity --seqType fq --max_memory 100G --left input_reads_pair_1.fastq --right i The second step of running Trinity is to run Trinity with the option **--no_run_chrysalis**: {{% panel header="`trinity_step2.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Trinity_Step2 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 @@ -48,7 +48,7 @@ Trinity --seqType fq --max_memory 100G --left input_reads_pair_1.fastq --right i The third step of running Trinity is to run Trinity with the option **--no_distributed_trinity_exec**: {{% panel header="`trinity_step3.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Trinity_Step3 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 @@ -67,7 +67,7 @@ Trinity --seqType fq --max_memory 100G --left input_reads_pair_1.fastq --right i The fourth step of running Trinity is to run Trinity without any additional option: {{% panel header="`trinity_step4.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Trinity_Step4 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md index 2f52cc8724f306cc870d700c088f6efcaa4f712e..6e6dc24cadcd6f40c7095065d458d56f1d95e12f 100644 --- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md +++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md @@ -10,7 +10,7 @@ weight = "10" The first step of running Velvet is to run **velveth**: {{% panel header="`velveth.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Velvet_Velveth #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 @@ -30,7 +30,7 @@ velveth output_directory/ 43 -fastq -longPaired -separate input_reads_pair_1.fas After running **velveth**, the next step is to run **velvetg** on the `output_directory/` and files generated from **velveth**: {{% panel header="`velvetg.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Velvet_Velvetg #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md index fa355b11a14273d787bb3eec9703c4d430e20ef8..187e020b00b1ef0d7a91047257821af548cacf67 100644 --- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md +++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md @@ -10,7 +10,7 @@ weight = "10" The first step of running Velvet is to run **velveth**: {{% panel header="`velveth.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Velvet_Velveth #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 @@ -30,7 +30,7 @@ velveth output_directory/ 51 -fasta -short input_reads.fasta -fasta -shortPaired After running **velveth**, the next step is to run **velvetg** on the `output_directory/` and files generated from **velveth**: {{% panel header="`velvetg.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Velvet_Velvetg #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md index 82efa872ca98a4925e16f0524fe98464130c1997..4528bc6c3ed5f54b0a4b6ba10203e4c3235108ed 100644 --- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md +++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md @@ -10,7 +10,7 @@ weight = "10" The first step of running Velvet is to run **velveth**: {{% panel header="`velveth.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Velvet_Velveth #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 @@ -30,7 +30,7 @@ velveth output_directory/ 31 -fasta -short input_reads.fasta After running **velveth**, the next step is to run **velvetg** on the `output_directory/` and files generated from **velveth**: {{% panel header="`velvetg.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Velvet_Velvetg #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md index 6f8770f0b8ab7d2323166d5cb172333ef3d6465b..d0e82b28ba24176240030950c1c94c71ec7517ab 100644 --- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md +++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md @@ -25,7 +25,7 @@ $ cutadapt --help Simple Cutadapt script that trims the adapter sequences **AGGCACACAGGG** and **TGAGACACGCA** from the 3' end and **AACCGGTT** from the 5' end of single-end fasta input file is shown below: {{% panel header="`cutadapt.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Cutadapt #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md index 09d2fd44d21f6486c08993823090fe8c904eedb3..ea0ad94ee998bf9f7d81dda9ac7e335aa2f28dfc 100644 --- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md +++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md @@ -27,7 +27,7 @@ The output format (`-out_format`) can be **1** (fasta only), **2** (fasta and qu Simple PRINSEQ SLURM script for single-end fasta data and fasta output format is shown below: {{% panel header="`prinseq_single_end.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=PRINSEQ #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 @@ -59,7 +59,7 @@ The output format (`-out_format`) can be **1** (fasta only), **2** (fasta and qu Simple PRINSEQ SLURM script for paired-end fastq data and fastq output format is shown below: {{% panel header="`prinseq_paired_end.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=PRINSEQ #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md index e907b01363814051449e623e54dd8f48ed7a0bad..f19e2bb045d99c632a825ba57ea68c472409e347 100644 --- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md +++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md @@ -25,7 +25,7 @@ $ scythe --help Simple Scythe script that uses the `illumina_adapters.fa` file and `input_reads.fastq` is shown below: {{% panel header="`scythe.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Scythe #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md index 7550b45007ddef8abed6be6d5a9ad2bc960ef5c4..537264059fd29e724e3abb81da521f2f04e09618 100644 --- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md +++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md @@ -28,7 +28,7 @@ where **input_reads.fastq** is the input file of sequencing data in fastq form Simple SLURM Sickle script for Illumina single-end reads input file `input_reads.fastq` is shown below: {{% panel header="`sickle_single.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Sickle #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 @@ -56,7 +56,7 @@ where **input_reads_pair_1.fastq** and **input_reads_pair_2.fastq** are the in Simple SLURM Sickle script for Sanger paired-end reads input files `input_reads_pair_1.fastq` and `input_reads_pair_2.fastq` is shown below: {{% panel header="`sickle_paired.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Sickle #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md index eeb9a893dfdf80cfe7493d2b8991118592f4bcd5..6619c5a74d1d71531b2af79ea928da76b30a8a77 100644 --- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md +++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md @@ -25,7 +25,7 @@ $ tagcleaner.pl --help Simple TagCleaner script for removing known 3' and 5' tag sequences (`NNNCCAAACACACCCAACACA` and `TGTGTTGGGTGTGTTTGGNNN` respectively) is shown below: {{% panel header="`tagcleaner.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=TagCleaner #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/qiime.md b/content/applications/app_specific/bioinformatics_tools/qiime.md index dfec13f048f5f9e32661e78e3c07f2daf6f42ac2..8d3e2c54a475456e4b3c37a1e75910f8b2358e19 100644 --- a/content/applications/app_specific/bioinformatics_tools/qiime.md +++ b/content/applications/app_specific/bioinformatics_tools/qiime.md @@ -35,7 +35,7 @@ Sample QIIME submit script to run **pick_open_reference_otus.py** is: {{% panel header="`qiime.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=QIIME #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md index 711471e185605e7611dcd1f04abacc6fb65d896f..b48c94f64c0e32bd1c291f4b72e01c9b48a2fa80 100644 --- a/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md +++ b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md @@ -22,7 +22,7 @@ $ cufflinks -h An example of how to run Cufflinks on Crane with alignment file in SAM format, output directory `cufflinks_output` and 8 CPUs is shown below: {{% panel header="`cufflinks.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=Cufflinks #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md index c3f17b64c60076f931dbf0a725c393e392da002e..0bc7928e03000ef8f0963e2102c87631d382a6d5 100644 --- a/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md +++ b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md @@ -21,7 +21,7 @@ An example of how to run basic CAP3 SLURM script on Crane is shown below: {{% panel header="`cap3.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=CAP3 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 diff --git a/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md index 4fa711fc8dc746afa343a98d56ada2c21b43dc20..c905ce22478a645c1a19dc8a29db61acbc0faeaf 100644 --- a/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md +++ b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md @@ -35,7 +35,7 @@ CD-HIT is multi-threaded program, and therefore, using multiple threads is recom Simple SLURM CD-HIT script for Crane with 8 CPUs is given in addition: {{% panel header="`cd-hit.submit`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=CD-HIT #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 diff --git a/content/applications/app_specific/dmtcp_checkpointing.md b/content/applications/app_specific/dmtcp_checkpointing.md index 9de1c055fe6790b4d29caea3f04d16b112047bb2..19786d370a6a6e2894b22211d3ca135b0b0c0afd 100644 --- a/content/applications/app_specific/dmtcp_checkpointing.md +++ b/content/applications/app_specific/dmtcp_checkpointing.md @@ -66,7 +66,7 @@ on crane is shown below: {{% panel theme="info" header="dmtcp_blastx.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=BlastX #SBATCH --nodes=1 #SBATCH --ntasks=8 @@ -98,7 +98,7 @@ following submit file: {{% panel theme="info" header="dmtcp_restart_blastx.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=BlastX #SBATCH --nodes=1 #SBATCH --ntasks=8 diff --git a/content/applications/app_specific/fortran_c_on_hcc.md b/content/applications/app_specific/fortran_c_on_hcc.md index 877bd5d0a1752023658c974029264a21a0acaee4..9a2c002f2b20852ef675d1dd8afc7dea0a19812e 100644 --- a/content/applications/app_specific/fortran_c_on_hcc.md +++ b/content/applications/app_specific/fortran_c_on_hcc.md @@ -123,7 +123,7 @@ line. {{% panel header="`submit_f.serial`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --mem-per-cpu=1024 #SBATCH --time=00:01:00 #SBATCH --job-name=Fortran @@ -137,7 +137,7 @@ module load compiler/gcc/4.9 {{% panel header="`submit_c.serial`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --mem-per-cpu=1024 #SBATCH --time=00:01:00 #SBATCH --job-name=C diff --git a/content/applications/app_specific/mpi_jobs_on_hcc.md b/content/applications/app_specific/mpi_jobs_on_hcc.md index 182012219d5f3e665d097ba3f366238114c06878..5d3792e58017172a33539d85c88fa786355f8a71 100644 --- a/content/applications/app_specific/mpi_jobs_on_hcc.md +++ b/content/applications/app_specific/mpi_jobs_on_hcc.md @@ -212,7 +212,7 @@ main program name. {{% panel header="`submit_f.mpi`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --ntasks=5 #SBATCH --mem-per-cpu=1024 #SBATCH --time=00:01:00 @@ -226,7 +226,7 @@ mpirun ./demo_f_mpi.x {{% panel header="`submit_c.mpi`"%}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --ntasks=5 #SBATCH --mem-per-cpu=1024 #SBATCH --time=00:01:00 diff --git a/content/applications/app_specific/running_gaussian_at_hcc.md b/content/applications/app_specific/running_gaussian_at_hcc.md index 45aa43e6695ea8c60149a1b984a08f85a80508a6..a903fe15e87632a6619be8f30f7f0b17fb383aae 100644 --- a/content/applications/app_specific/running_gaussian_at_hcc.md +++ b/content/applications/app_specific/running_gaussian_at_hcc.md @@ -96,7 +96,7 @@ Content of Gaussian SLURM submission file `run-g09-general.slurm`: {{% panel theme="info" header="run-g09-general.slurm" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH -J g09 #SBATCH --nodes=1 --ntasks-per-node=4 #SBATCH --mem-per-cpu=2000 @@ -164,7 +164,7 @@ Submit your initial **g09** job with the following SLURM submission file: {{% panel theme="info" header="Submit with dmtcp" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH -J g09-dmtcp #SBATCH --nodes=1 --ntasks-per-node=16 #SBATCH --mem-per-cpu=4000 @@ -214,7 +214,7 @@ resume your interrupted job: {{% panel theme="info" header="Resume with dmtcp" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH -J g09-restart #SBATCH --nodes=1 --ntasks-per-node=16 #SBATCH --mem-per-cpu=4000 diff --git a/content/applications/app_specific/running_ocean_land_atmosphere_model_olam.md b/content/applications/app_specific/running_ocean_land_atmosphere_model_olam.md index 2efd86d9809458b58df3a384884453df5273a0eb..67dee23d1502bde62ed21e90ee7f9a0b6d51eba9 100644 --- a/content/applications/app_specific/running_ocean_land_atmosphere_model_olam.md +++ b/content/applications/app_specific/running_ocean_land_atmosphere_model_olam.md @@ -206,7 +206,7 @@ USE_HDF5=1 {{% panel theme="info" header="Sample submit script for PGI compiler" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --ntasks=8 # 8 cores #SBATCH --mem-per-cpu=1024 # Minimum memory required per CPU (in megabytes) #SBATCH --time=03:15:00 # Run time in hh:mm:ss @@ -223,7 +223,7 @@ mpirun /path/to/olam-4.2c-mpi {{% panel theme="info" header="Sample submit script for Intel compiler" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --ntasks=8 # 8 cores #SBATCH --mem-per-cpu=1024 # Minimum memory required per CPU (in megabytes) #SBATCH --time=03:15:00 # Run time in hh:mm:ss diff --git a/content/applications/user_software/using_singularity.md b/content/applications/user_software/using_singularity.md index 801f3d628f03b107c4712fea0a6aa41f94d7347e..c5521bf213abf140493ed85681dd33eec25f2920 100644 --- a/content/applications/user_software/using_singularity.md +++ b/content/applications/user_software/using_singularity.md @@ -78,7 +78,7 @@ Using Singularity in a SLURM job is similar to how you would use any other softw {{% panel theme="info" header="Example Singularity SLURM script" %}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --time=03:15:00 # Run time in hh:mm:ss #SBATCH --mem-per-cpu=4096 # Maximum memory required per CPU (in megabytes) #SBATCH --job-name=singularity-test @@ -201,7 +201,7 @@ For example, {{% panel theme="info" header="Example SLURM script" %}} {{< highlight bash >}} -#!/bin/sh +#!/bin/bash #SBATCH --time=03:15:00 # Run time in hh:mm:ss #SBATCH --mem-per-cpu=4096 # Maximum memory required per CPU (in megabytes) #SBATCH --job-name=singularity-test diff --git a/content/submitting_jobs/job_dependencies.md b/content/submitting_jobs/job_dependencies.md index 36aa971e848093c509eed7076212f6448a338b33..b0dd6b7b32d7068e6f3bf03d1f4fc6d2eb3247c0 100644 --- a/content/submitting_jobs/job_dependencies.md +++ b/content/submitting_jobs/job_dependencies.md @@ -34,7 +34,7 @@ The SLURM submit files for each step are below. {{%expand "JobA.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=JobA #SBATCH --time=00:05:00 #SBATCH --ntasks=1 @@ -49,7 +49,7 @@ sleep 120 {{%expand "JobB.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=JobB #SBATCH --time=00:05:00 #SBATCH --ntasks=1 @@ -66,7 +66,7 @@ sleep 120 {{%expand "JobC.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=JobC #SBATCH --time=00:05:00 #SBATCH --ntasks=1 @@ -83,7 +83,7 @@ sleep 120 {{%expand "JobC.submit" %}} {{< highlight batch >}} -#!/bin/sh +#!/bin/bash #SBATCH --job-name=JobD #SBATCH --time=00:05:00 #SBATCH --ntasks=1