diff --git a/config.toml b/config.toml
index 1d8965824b7f6660ea1f4247b9c07cac29f929aa..5a883fd302fae7a247beb7d1c7a0df7aaac0f3b7 100644
--- a/config.toml
+++ b/config.toml
@@ -42,9 +42,5 @@ weight = 10
 
 # Hugo v0.60+ switched renderer from blackfriday to goldmark
 # - Allow unsafe for docdock template rendering
-#[markup.goldmark.renderer]
-#unsafe = true
-
-# FIXME: Use blackfriday renderer until we update the content to render with goldmark
-[markup]
-defaultMarkdownHandler = "blackfriday"
+[markup.goldmark.renderer]
+unsafe = true
diff --git a/content/Events/2015/hcc_fall_kickstart_2015.md b/content/Events/2015/hcc_fall_kickstart_2015.md
index 713201f47274f906e9ed1a7b1cee27268a1a944a..eef8b2e46c7b10debe7462e1b063c7a634167a68 100644
--- a/content/Events/2015/hcc_fall_kickstart_2015.md
+++ b/content/Events/2015/hcc_fall_kickstart_2015.md
@@ -15,7 +15,7 @@ Materials
 Software Carpentry
 Lessons: <a href="http://eharstad.github.io/2015-09-08-UNL/" class="external-link">http://eharstad.github.io/2015-09-08-UNL/</a>
 
-[Slides for Day 1] (https://unl.box.com/s/3tz0e3tbxzx9wt8s5l65e1w3fm1w4ojx) (Software Carpentry Introduction)
+[Slides for Day 1](https://unl.box.com/s/3tz0e3tbxzx9wt8s5l65e1w3fm1w4ojx) (Software Carpentry Introduction)
 
 [Slides for Day 2](https://unl.box.com/s/8ckxlt7f9geiphiomjodesx9a2nw664b) (Morning Session)
 
diff --git a/content/FAQ/_index.md b/content/FAQ/_index.md
index a130cf58ab3648ec387f8128b0711cb401f86973..e653fe1f04f726c8b2e62af7e6cdd477d8d2fa2e 100644
--- a/content/FAQ/_index.md
+++ b/content/FAQ/_index.md
@@ -57,7 +57,7 @@ they were at the time of our last backup. Please note that any changes
 made to the files between when the backup was made and when you deleted
 them will not be preserved. To have these files restored, please contact
 HCC Support at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 as soon as possible.
 
 **If the files were in your $WORK directory (/work/group/user/):** No.
@@ -78,7 +78,7 @@ Please stop by
 [our offices](http://hcc.unl.edu/location)
 along with a photo ID and we will be happy to activate it for you. If
 you are not local to Omaha or Lincoln, contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 and we will help you activate Duo remotely.
 
 **If you have activated Duo previously but now have a different phone
@@ -90,7 +90,7 @@ Duo and update your account with your new phone number.
 **If you have activated Duo previously and have the same phone number:**
 
 Email us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 from the email address your account is registered under and we will send
 you a new link that you can use to activate Duo.
 
@@ -132,7 +132,7 @@ is produced.
 **If you are running from inside your $WORK directory:**
 
 Contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 with your login, the name of the cluster you are running on, and the
 full path to your submit script and we will be happy to help solve the
 issue.
@@ -160,7 +160,7 @@ For additional details on how to monitor usage on jobs, check out the
 documentation on [Monitoring Jobs]({{< relref "monitoring_jobs" >}}).
 
 If you continue to run into issues, please contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 for additional assistance.
 
 #### I want to talk to a human about my problem. Can I do that?
diff --git a/content/_footer.md b/content/_footer.md
index 7a9bc1efc01e46bf174e80b56ac4a403cc3f0e9f..19f57989e9168d5a51b8a2facc1818ed09a4fd53 100644
--- a/content/_footer.md
+++ b/content/_footer.md
@@ -2,6 +2,6 @@
   title = "Footer"
 +++
 
-{{< icon name="copyright-mark" >}} [Holland Computing Center] (https://hcc.unl.edu) | 118 Schorr Center, Lincoln NE 68588 | {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) | {{< icon name="phone-alt" >}}402-472-5041
+{{< icon name="copyright-mark" >}} [Holland Computing Center](https://hcc.unl.edu) | 118 Schorr Center, Lincoln NE 68588 | {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu) | {{< icon name="phone-alt" >}}402-472-5041
 
 See something wrong?  Help us fix it by [contributing](https://git.unl.edu/hcc/hcc-docs/blob/master/CONTRIBUTING.md)!
diff --git a/content/accounts/setting_up_and_using_duo.md b/content/accounts/setting_up_and_using_duo.md
index 723c840fb3300c2cb21e2f5521f8ddb01d4ce923..25d013ccef7327a74ad3a7feccd6db0f8b07ef8d 100644
--- a/content/accounts/setting_up_and_using_duo.md
+++ b/content/accounts/setting_up_and_using_duo.md
@@ -35,13 +35,13 @@ If you *are not* currently using Duo with your TrueYou account:
     Faculty/staff members with a verified NU telephone number can enroll by
     phone. If you would like an HCC staff member to call your NU telephone
     number to enroll, please email
-    {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+    {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
     with a time you will be available.
 
 If you *are* currently using Duo with your TrueYou account:
 
 1.  You can request to use the same phone for HCC's Duo as you are using for TrueYou.
-    Please contact [hcc-support@unl.edu] (mailto:hcc-support@unl.edu) with the request
+    Please contact [hcc-support@unl.edu](mailto:hcc-support@unl.edu) with the request
     using the email address associated with your TrueYou account. In the email, include
     the last 4 digits of the phone number for verification.
 
@@ -72,7 +72,7 @@ exactly as before.
 
 After 10 failed authentication attempts, the user's account is
 disabled. If this is the case, then the user needs to send an email to
-[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 including his/her username and the reason why multiple failed
 authentication attempts occurred.  
 {{% /notice %}}
@@ -92,7 +92,7 @@ Simply tap `Approve` to verify the login.
 
 {{% notice warning%}}**If you receive a verification request you didn't initiate, deny the 
 request and contact HCC immediately via email at
-[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)**
+[hcc-support@unl.edu](mailto:hcc-support@unl.edu)**
 {{% /notice %}}
 
 In the terminal, the login will now complete and the user will logged in
diff --git a/content/anvil/_index.md b/content/anvil/_index.md
index bd4fc812509305d14b13d06f1f5c1b8fbb1b0e87..f0ba40a5adc71af78d4b7e5fac7d81ddce6a8593 100644
--- a/content/anvil/_index.md
+++ b/content/anvil/_index.md
@@ -173,7 +173,7 @@ precious or irreproducible data should not be placed or left on Anvil**.
     subscription is required.  As part of HCC's Globus Provider Plan,
     HCC can provide this on a per-user basis free of charge.  If you are
     interested in Globus Plus, please email
-    {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+    {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
     with your request and a brief explanation.
 
 ## Backups
@@ -192,6 +192,6 @@ disaster recovery backups should not be the only source of backups for
 important data. The backup policies are subject to change without prior
 notice. To retrieve your backups, please contact HCC. If you have
 special concerns please contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
 
 
diff --git a/content/anvil/available_images.md b/content/anvil/available_images.md
index 66e47a2b40519ba476845d1da44249839d7d92dc..6f4493dda2f90b522c5fa3bb82add56ddf460d82 100644
--- a/content/anvil/available_images.md
+++ b/content/anvil/available_images.md
@@ -13,5 +13,5 @@ list of available images.
 {{< /sorttable >}}
 
 Additional images can be produced by HCC staff by request at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
  
diff --git a/content/anvil/resizing_an_instance.md b/content/anvil/resizing_an_instance.md
index fab023572069f258d449adf1509ea23946f4e05e..a0461e1fda5ecbe458bd288751995ee9a8a10d48 100644
--- a/content/anvil/resizing_an_instance.md
+++ b/content/anvil/resizing_an_instance.md
@@ -63,4 +63,4 @@ At this point, your instance should have a status of *Active*. Connect
 to your instance and ensure that everything works as expected.
 
 {{% notice warning %}}
-If your instance ends in an "Error" state or you have any issues or questions with resizing, please contact us at [hcc-support@unl.edu] (mailto:hcc-support@unl.edu) for assistance.{{% /notice %}}
+If your instance ends in an "Error" state or you have any issues or questions with resizing, please contact us at [hcc-support@unl.edu](mailto:hcc-support@unl.edu) for assistance.{{% /notice %}}
diff --git a/content/anvil/what_are_the_per_group_resource_limits.md b/content/anvil/what_are_the_per_group_resource_limits.md
index 4a44f23b3952e14e6ccc551073494a5c42833614..a2cc67fca1c0f067089c680f13a279cd1d7468aa 100644
--- a/content/anvil/what_are_the_per_group_resource_limits.md
+++ b/content/anvil/what_are_the_per_group_resource_limits.md
@@ -26,6 +26,6 @@ resources, that can be accommodated on a fee basis, depending on the
 amount needed.  Please see the HCC
 [Priority Access Pricing](http://hcc.unl.edu/priority-access-pricing) page for specific costs.
 *By default, no public IP addresses are provided.*
-Please contact {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+Please contact {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 for more details.
 
diff --git a/content/applications/app_specific/Jupyter.md b/content/applications/app_specific/Jupyter.md
index d738c1c94bc4a7a3a1e174957541b412f57aec2a..135e3e82c7d23c668748f820c0bbe7b939213375 100644
--- a/content/applications/app_specific/Jupyter.md
+++ b/content/applications/app_specific/Jupyter.md
@@ -4,10 +4,10 @@ description = "How to access and use a Jupyter Notebook"
 weight = 20
 +++
 
-- [Connecting to JupyterHub] (#connecting-to-jupyterhub)
-- [Running Code] (#running-code)
-- [Opening a Terminal] (#opening-a-terminal)
-- [Using Custom Packages] (#using-custom-packages)
+- [Connecting to JupyterHub](#connecting-to-jupyterhub)
+- [Running Code](#running-code)
+- [Opening a Terminal](#opening-a-terminal)
+- [Using Custom Packages](#using-custom-packages)
 
 ## Connecting to JupyterHub
 -----------------------
@@ -42,7 +42,7 @@ Select the other options based on your computing needs. Note that a SLURM Job wi
 
 1.	From your user home page, select "terminal" from the "New" drop-down menu.
 {{< figure src="/images/jupyterTerminal.png">}}
-2.	A terminal opens in a new tab. You can enter [Linux commands] ({{< relref "basic_linux_commands" >}})
+2.	A terminal opens in a new tab. You can enter [Linux commands]({{< relref "basic_linux_commands" >}})
  at the prompt.
 {{< figure src="/images/jupyterTerminal2.png">}}
 
diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
index aa90760502ae33aad065c3fd6c969ab593bf9bd2..c3cb52f9a69e929c2fffca2a769940d85382ed7a 100644
--- a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
+++ b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
@@ -46,7 +46,7 @@ The basic usage of ``perf-report` is:
 {{< highlight bash >}}
 perf-report [OPTION...] PROGRAM [PROGRAM_ARGS]
 or
-perf-report [OPTION...] (mpirun|mpiexec|aprun|...) [MPI_ARGS] PROGRAM [PROGRAM_ARGS]
+perf-report [OPTION...](mpirun|mpiexec|aprun|...) [MPI_ARGS] PROGRAM [PROGRAM_ARGS]
 {{< /highlight >}}
 {{% /panel %}}
 
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md
index 3fbde63b1ca76c9f80c6c17ded9cb85aaf1d38e9..c37870f7b0a0800706b259d65a23c521537f9320 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md
@@ -5,7 +5,7 @@ weight = "52"
 +++
 
 
-[BLAST] (https://blast.ncbi.nlm.nih.gov/Blast.cgi) is a local alignment tool that finds similarity between sequences. This tool compares nucleotide or protein sequences to sequence databases, and calculates significance of matches. Sometimes these input sequences are large and using the command-line BLAST is required.
+[BLAST](https://blast.ncbi.nlm.nih.gov/Blast.cgi) is a local alignment tool that finds similarity between sequences. This tool compares nucleotide or protein sequences to sequence databases, and calculates significance of matches. Sometimes these input sequences are large and using the command-line BLAST is required.
 
 The following pages, [Create Local BLAST Database]({{<relref "create_local_blast_database" >}}) and [Running BLAST Alignment]({{<relref "running_blast_alignment" >}}) describe how to run some of the most common BLAST executables as a single job using the SLURM scheduler on HCC.
 
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
index 936c2ebbd95c39a0c72b35fff7b66a62bc30b265..e4de1150c6971f8e228cd26251c8d2fd2e93f16e 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
@@ -20,7 +20,7 @@ $ blastn -query input_reads.fasta -db input_reads_db -out blastn_output.alignmen
 {{< /highlight >}}
 where **input_reads.fasta** is an input file of sequence data in fasta format, **input_reads_db** is the generated BLAST database, and **blastn_output.alignments** is the output file where the alignments are stored.
 
-Additional parameters can be found in the [BLAST manual] (https://www.ncbi.nlm.nih.gov/books/NBK279690/), or by typing:
+Additional parameters can be found in the [BLAST manual](https://www.ncbi.nlm.nih.gov/books/NBK279690/), or by typing:
 {{< highlight bash >}}
 $ blastn -help
 {{< /highlight >}}
@@ -28,7 +28,7 @@ $ blastn -help
 These BLAST alignment commands are multi-threaded, and therefore using the BLAST option **-num_threads <number_of_CPUs>** is recommended.
 
 
-HCC hosts multiple BLAST databases and indices on Crane. In order to use these resources, the ["biodata" module] ({{<relref "/applications/app_specific/bioinformatics_tools/biodata_module">}}) needs to be loaded first. The **$BLAST** variable contains the following currently available databases:
+HCC hosts multiple BLAST databases and indices on Crane. In order to use these resources, the ["biodata" module]({{<relref "/applications/app_specific/bioinformatics_tools/biodata_module">}}) needs to be loaded first. The **$BLAST** variable contains the following currently available databases:
 
 - **16SMicrobial**
 - **nr**
@@ -37,7 +37,7 @@ HCC hosts multiple BLAST databases and indices on Crane. In order to use these r
 - **refseq_rna**
 - **swissprot**
 
-If you want to create and use a BLAST database that is not mentioned above, check [Create Local BLAST Database]({{<relref "create_local_blast_database" >}}). If you want a database to be added to the ["biodata" module] ({{<relref "/applications/app_specific/bioinformatics_tools/biodata_module">}}), please send a request to bcrf-support@unl.edu.
+If you want to create and use a BLAST database that is not mentioned above, check [Create Local BLAST Database]({{<relref "create_local_blast_database" >}}). If you want a database to be added to the ["biodata" module]({{<relref "/applications/app_specific/bioinformatics_tools/biodata_module">}}), please send a request to bcrf-support@unl.edu.
 
 {{% notice info %}}
 **To access the older format of BLAST databases that work with BLAST+ 2.9 and lower, please use the variable BLAST_V4.**
@@ -81,7 +81,7 @@ $ blastn -query input_reads.fasta -db input_reads_db -out blastn_output.alignmen
 {{< /highlight >}}
 
 
-The default BLAST output is in pairwise format. However, BLAST’s parameter **-outfmt** supports output in [different formats] (https://www.ncbi.nlm.nih.gov/books/NBK279684/) that are easier for parsing.
+The default BLAST output is in pairwise format. However, BLAST’s parameter **-outfmt** supports output in [different formats](https://www.ncbi.nlm.nih.gov/books/NBK279684/) that are easier for parsing.
 
 
 Basic SLURM example of protein BLAST run against the non-redundant **nr **BLAST database with tabular output format and `8 CPUs` is shown below. Similarly as before, the query and database files are copied to the **/scratch/** directory. The BLAST output is also saved in this directory (**/scratch/blastx_output.alignments**). After BLAST finishes, the output file is copied from the worker node to your current work directory.
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md
index 5f2c3fb4bb819ac548711b715d28921a01328580..9113bca24e2af8b1d905f63d87835b1ff65d4ac5 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md
@@ -15,7 +15,7 @@ $ blat database query output_alignment.txt [options]
 where **database** is the name of the database used for the alignment, **query** is the name of the input file of sequence data in `fasta/nib/2bit` format, and **output_alignment.txt** is the output alignment file.
 
 
-Additional parameters for BLAT alignment can be found in the [manual] (http://genome.ucsc.edu/FAQ/FAQblat), or by using:
+Additional parameters for BLAT alignment can be found in the [manual](http://genome.ucsc.edu/FAQ/FAQblat), or by using:
 {{< highlight bash >}}
 $ blat
 {{< /highlight >}}
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md
index 7a0670a6d438724d1dfa38f0c8a44ff26cc1c40e..41aaf03dbb2c4177d1e9e0fbbf471b88fe9345e2 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Bowtie] (http://bowtie-bio.sourceforge.net/index.shtml) is an ultrafast and memory-efficient aligner for large sets of sequencing reads to a reference genome. Bowtie indexes the genome with a Burrows-Wheeler index to keep its memory footprint small. Bowtie also supports usage of multiple processors to achieve greater alignment speed.
+[Bowtie](http://bowtie-bio.sourceforge.net/index.shtml) is an ultrafast and memory-efficient aligner for large sets of sequencing reads to a reference genome. Bowtie indexes the genome with a Burrows-Wheeler index to keep its memory footprint small. Bowtie also supports usage of multiple processors to achieve greater alignment speed.
 
 
 The first and basic step of running Bowtie is to build and format an index from the reference genome. The basic usage of this command, **bowtie-build** is:
@@ -19,7 +19,7 @@ After the index of the reference genome is generated, the next step is to align
 $ bowtie [-q|-f|-r|-c] index_prefix [-1 input_reads_pair_1.[fasta|fastq] -2 input_reads_pair_2.[fasta|fastq] | input_reads.[fasta|fastq]] [options]
 {{< /highlight >}}
 where **index_prefix** is the generated index using the **bowtie-build** command, and **options** are optional parameters that can be found in the [Bowtie
-manual] (http://bowtie-bio.sourceforge.net/manual.shtml).
+manual](http://bowtie-bio.sourceforge.net/manual.shtml).
 
 
 Bowtie supports both single-end (`input_reads.[fasta|fastq]`) and paired-end (`input_reads_pair_1.[fasta|fastq]`, `input_reads_pair_2.[fasta|fastq]`) files in fasta or fastq format. The format of the input files also needs to be specified by using the following flags: **-q** (fastq files), **-f** (fasta files), **-r** (raw one-sequence per line), or **-c** (sequences given on command line).
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md
index 2fcb2817b484dbf562e4f2f2321b54ea69af9414..8d091433c32099f6cd7e05525f9f4b722adb7bae 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Bowtie2] (http://bowtie-bio.sourceforge.net/bowtie2/index.shtml) is an ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. Although Bowtie and Bowtie2 are both fast read aligners, there are few main differences between them:
+[Bowtie2](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml) is an ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. Although Bowtie and Bowtie2 are both fast read aligners, there are few main differences between them:
 
 - Bowtie2 supports gapped alignment with affine gap penalties, without restrictions on the number of gaps and gap lengths.
 - Bowtie supports reads longer than 50bp and is generally faster, more sensitive, and uses less memory than Bowtie.
@@ -28,7 +28,7 @@ The command **bowtie2** takes a Bowtie2 index and set of sequencing read files a
 {{< highlight bash >}}
 $ bowtie2 -x index_prefix [-q|--qseq|-f|-r|-c] [-1 input_reads_pair_1.[fasta|fastq] -2 input_reads_pair_2.[fasta|fastq] | -U input_reads.[fasta|fastq]] -S bowtie2_alignments.sam [options]
 {{< /highlight >}}
-where **index_prefix** is the generated index using the **bowtie2-build** command, and **options** are optional parameters that can be found in the [Bowtie2 manual] (http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml). Bowtie2 supports both single-end (`input_reads.[fasta|fastq]`) and paired-end (`input_reads_pair_1.[fasta|fastq]`, `input_reads_pair_2.[fasta|fastq]`) files in fasta or fastq format. The format of the input files also needs to be specified by using one of the following flags: **-q** (fastq files), **--qseq** (Illumina's qseq format), **-f** (fasta files), **-r** (raw one sequence per line), or **-c** (sequences given on command line).
+where **index_prefix** is the generated index using the **bowtie2-build** command, and **options** are optional parameters that can be found in the [Bowtie2 manual](http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml). Bowtie2 supports both single-end (`input_reads.[fasta|fastq]`) and paired-end (`input_reads_pair_1.[fasta|fastq]`, `input_reads_pair_2.[fasta|fastq]`) files in fasta or fastq format. The format of the input files also needs to be specified by using one of the following flags: **-q** (fastq files), **--qseq** (Illumina's qseq format), **-f** (fasta files), **-r** (raw one sequence per line), or **-c** (sequences given on command line).
 
 
 An example of how to run Bowtie2 local alignment on Crane with paired-end fasta files and `8 CPUs` is shown below:
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md
index 5b770061ec93f6a78c6ec0a37dfd86ea2306fa08..0b35ec8d37a6c0b7a9f99849359e970add4acb71 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md
@@ -33,7 +33,7 @@ For detailed description and more information on a specific command, just type:
 {{< highlight bash >}}
 $  bwa COMMAND
 {{< /highlight >}}
-or check the [BWA manual] (http://bio-bwa.sourceforge.net/bwa.shtml).
+or check the [BWA manual](http://bio-bwa.sourceforge.net/bwa.shtml).
 
 
 The page [Running BWA Commands]({{<relref "running_bwa_commands" >}}) shows how to run BWA on HCC.
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md
index 94fbd172aa458b6ced73f7927d1c42d5eba58c1a..6aeebfae3f8913f7cb4f333b94701bad3eca6a41 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Clustal Omega] (http://www.clustal.org/omega/) is a general purpose multiple sequence alignment (MSA) tool used mainly with protein, as well as DNA and RNA sequences. Clustal Omega is fast and scalable aligner that can align datasets of hundreds of thousands of sequences in reasonable time.
+[Clustal Omega](http://www.clustal.org/omega/) is a general purpose multiple sequence alignment (MSA) tool used mainly with protein, as well as DNA and RNA sequences. Clustal Omega is fast and scalable aligner that can align datasets of hundreds of thousands of sequences in reasonable time.
 
 
 The general usage of Clustal Omega is:
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md
index 057674311f3ed98ed1e4f5b758dbebe03b2b82e1..b80475689224d4b58f329dd4215c66f04b5d8f39 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[TopHat] (https://ccb.jhu.edu/software/tophat/index.shtml) is a fast splice junction mapper for RNA-Seq data. It first aligns RNA-Seq reads to reference genomes using the ultra high-throughput short read aligner Bowtie, and then analyzes the mapping results to identify splice junctions between exons. 
+[TopHat](https://ccb.jhu.edu/software/tophat/index.shtml) is a fast splice junction mapper for RNA-Seq data. It first aligns RNA-Seq reads to reference genomes using the ultra high-throughput short read aligner Bowtie, and then analyzes the mapping results to identify splice junctions between exons. 
 
 Although there is no difference between the available options for both TopHat and TopHat2 and the number of output files, TopHat2 incorporates many significant improvements to TopHat. The TopHat package at HCC supports both **tophat** and **tophat2**.
 
@@ -19,7 +19,7 @@ where **index_prefix** is the basename of the genome index to be searched. This
 TopHat2 uses single or comma-separated list of paired-end and single-end reads in fasta or fastq format. The single-end reads need to be provided after the paired-end reads.
 
 
-More advanced TopHat2 options can be found in [its manual] (https://ccb.jhu.edu/software/tophat/manual.shtml), or by typing:
+More advanced TopHat2 options can be found in [its manual](https://ccb.jhu.edu/software/tophat/manual.shtml), or by typing:
 {{< highlight bash >}}
 $ tophat2 -h
 {{< /highlight >}}
diff --git a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
index df9a00e249745424df56d468de309da6f4971c81..a3887e182d6481481568199894b498566740a4b9 100644
--- a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
@@ -5,7 +5,7 @@ weight = "52"
 +++
 
 
-The SAM format is a standard format for storing large nucleotide sequence alignments. The BAM format is just the binary form from SAM. [SAMtools] (http://www.htslib.org/) is a toolkit for manipulating alignments in SAM/BAM format, including sorting, merging, indexing and generating alignments in a per-position format.
+The SAM format is a standard format for storing large nucleotide sequence alignments. The BAM format is just the binary form from SAM. [SAMtools](http://www.htslib.org/) is a toolkit for manipulating alignments in SAM/BAM format, including sorting, merging, indexing and generating alignments in a per-position format.
 
 
 The basic usage of SAMtools is:
@@ -39,7 +39,7 @@ For detailed description and more information on a specific command, just type:
 {{< highlight bash >}}
 $ samtools COMMAND
 {{< /highlight >}}
-or check the [SAMtools manual] (http://www.htslib.org/doc/samtools.html).
+or check the [SAMtools manual](http://www.htslib.org/doc/samtools.html).
 
 
 The page [Running SAMtools Commands]({{<relref "running_samtools_commands" >}}) shows how to run SAMtools on HCC.
diff --git a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
index 3cfaa501ed2314eb20c33f014b120bb576b528f4..d2704b9f8f319a03e749e482c0a137dcd6edbe6d 100644
--- a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
+++ b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[SRA (Sequence Read Archive)] (http://www.ncbi.nlm.nih.gov/sra) is an NCBI-defined format for NGS data. Every data submitted to NCBI needs to be in SRA format. The SRA Toolkit provides tools for converting different formats of data into SRA format, and vice versa, extracting SRA data in other different formats.
+[SRA (Sequence Read Archive)](http://www.ncbi.nlm.nih.gov/sra) is an NCBI-defined format for NGS data. Every data submitted to NCBI needs to be in SRA format. The SRA Toolkit provides tools for converting different formats of data into SRA format, and vice versa, extracting SRA data in other different formats.
 
 The SRA Toolkit allows converting data from the SRA format to the following formats: `ABI SOLiD native`, `fasta`, `fastq`, `sff`, `sam`, and `Illumina native`. Also, the SRA Toolkit allows converting data from `fasta`, `fastq`, `AB SOLiD-SRF`, `AB SOLiD-native`, `Illumina SRF`, `Illumina native`, `sff`, and `bam` format into the SRA format.
 
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md
index 7f29d4a32a43422a08d435f31b3d51ebb95d84f0..82278e8858fb1b26ea367756a8e1e98ab062e929 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-Velvet by itself generates assembled contigs for DNA data. However, using the Oases extension for Velvet, a transcriptome assembly can be produced. [Oases] (https://www.ebi.ac.uk/~zerbino/oases/) is an extension of Velvet for generating de novo assembly for RNA-Seq data. Oases uses the preliminary assembly produced by Velvet as an input, and constructs transcripts.
+Velvet by itself generates assembled contigs for DNA data. However, using the Oases extension for Velvet, a transcriptome assembly can be produced. [Oases](https://www.ebi.ac.uk/~zerbino/oases/) is an extension of Velvet for generating de novo assembly for RNA-Seq data. Oases uses the preliminary assembly produced by Velvet as an input, and constructs transcripts.
 
 
 In order to be able to run Oases, after `velveth`, `velvetg` needs to be run with the `–read_trkg yes` option:
@@ -22,7 +22,7 @@ contigs.fa  Graph2  LastGraph  Log  PreGraph  Roadmaps  Sequences  stats.txt
 {{% /panel %}}
 
 
-Oases has a lot of parameters that can be found in its [manual] (https://www.ebi.ac.uk/~zerbino/oases/OasesManual.pdf). While Velvet is multi-threaded, Oases is not.
+Oases has a lot of parameters that can be found in its [manual](https://www.ebi.ac.uk/~zerbino/oases/OasesManual.pdf). While Velvet is multi-threaded, Oases is not.
 
 
 A simple SLURM script to run Oases on the Velvet output stored in `output_directory/` with minimum transcript length of `200` is shown below:
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md
index 795d16233e8487cd19b9834185433a55884eb13d..7e57db1f1d964c035c1cfc2cca6879170bf42cde 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Ray] (http://denovoassembler.sourceforge.net/) is a de novo de Bruijn genome assembler that works with next-generation sequencing data (Illumina, 454, SOLiD). Ray is scalable and parallel software that takes advantage of multiple nodes and multiple CPUs using MPI (message passing interface).
+[Ray](http://denovoassembler.sourceforge.net/) is a de novo de Bruijn genome assembler that works with next-generation sequencing data (Illumina, 454, SOLiD). Ray is scalable and parallel software that takes advantage of multiple nodes and multiple CPUs using MPI (message passing interface).
 
 
 Ray can be used for building multiple applications:
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
index 30b83f5686d6baff087cd1d0e544b2a717e2e5b2..d4248506b850803365f7207921a62ed533ddd7da 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[SOAPdenovo] (http://soap.genomics.org.cn/soapdenovo.html) is a de novo genome assembler for short reads. It is specially designed for Illumina GA short reads and large plant and animal genomes. SOAPdenovo2 is a newer version of SOAPdenovo with improved algorithm that reduces memory consumption, resolves more repeat regions, increases coverage, and optimizes the assembly for large genomes.
+[SOAPdenovo](http://soap.genomics.org.cn/soapdenovo.html) is a de novo genome assembler for short reads. It is specially designed for Illumina GA short reads and large plant and animal genomes. SOAPdenovo2 is a newer version of SOAPdenovo with improved algorithm that reduces memory consumption, resolves more repeat regions, increases coverage, and optimizes the assembly for large genomes.
 
 SOAPdenovo2 has two commands, **SOAPdenovo-63mer** and **SOAPdenovo-127mer**. The first one is suitable for assembly with k-mer values less than 63 bp, requires less memory and runs faster. The latter one works for k-mer values less than 127 bp.
 
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
index 3147c63996f152d32baec4b1cb6c876a60df5472..09067c00dbb06cb20d2d4629057cc7829c25a99c 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
@@ -5,7 +5,7 @@ weight = "52"
 +++
 
 
-[Trinity] (https://github.com/trinityrnaseq/trinityrnaseq/wiki) is a method for efficient and robust de novo reconstruction of transcriptomes from RNA-Seq data. Trinity combines three independent software modules: `Inchworm`, `Chrysalis`, and `Butterfly`. All these modules can be applied sequentially to process large RNA-Seq datasets.
+[Trinity](https://github.com/trinityrnaseq/trinityrnaseq/wiki) is a method for efficient and robust de novo reconstruction of transcriptomes from RNA-Seq data. Trinity combines three independent software modules: `Inchworm`, `Chrysalis`, and `Butterfly`. All these modules can be applied sequentially to process large RNA-Seq datasets.
 
 
 The basic usage of Trinity is:
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
index 427424d769b0a9e7e609fd22a05d7088793b0053..0c14aea5a53b985580bd2eeecf2da053cd61497e 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
@@ -5,9 +5,9 @@ weight = "52"
 +++
 
 
-[Velvet] (https://www.ebi.ac.uk/~zerbino/velvet/) is a general sequence assembler designed to produce assembly from short, as well as long reads. Running Velvet consists of a sequence of two commands **velveth** and **velvetg**. **velveth** produces a hash table of k-mers, while **velvetg** constructs the genome assembly. The k-mer length, also known as hash length corresponds to the length, in base pairs, of the words of the reads being hashed.
+[Velvet](https://www.ebi.ac.uk/~zerbino/velvet/) is a general sequence assembler designed to produce assembly from short, as well as long reads. Running Velvet consists of a sequence of two commands **velveth** and **velvetg**. **velveth** produces a hash table of k-mers, while **velvetg** constructs the genome assembly. The k-mer length, also known as hash length corresponds to the length, in base pairs, of the words of the reads being hashed.
 
-Velvet has lots of parameters that can be found in its [manual] (https://www.ebi.ac.uk/~zerbino/velvet/Manual.pdf). However, the k-mer value is crucial in obtaining optimal assemblies. Higher k-mer values increase the specificity, and lower k-mer values increase the sensitivity.
+Velvet has lots of parameters that can be found in its [manual](https://www.ebi.ac.uk/~zerbino/velvet/Manual.pdf). However, the k-mer value is crucial in obtaining optimal assemblies. Higher k-mer values increase the specificity, and lower k-mer values increase the sensitivity.
 
 Velvet supports multiple file formats: `fasta`, `fastq`, `fasta.gz`, `fastq.gz`, `sam`, `bam`, `eland`, `gerald`. Velvet also supports different read categories for different sequencing technologies and libraries, e.g. `short`, `shortPaired`, `short2`, `shortPaired2`, `long`, `longPaired`.
 
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md
index 779a7e641e16639b939304612e937c10ee727292..6f8770f0b8ab7d2323166d5cb172333ef3d6465b 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Cutadapt] (https://cutadapt.readthedocs.io/en/stable/index.html) is a tool for removing adapter sequences from DNA sequencing data. Although most of the adapters are located at the 3' end of the sequencing read, Cutadapt allows multiple adapter removal from both 3' and 5' ends.
+[Cutadapt](https://cutadapt.readthedocs.io/en/stable/index.html) is a tool for removing adapter sequences from DNA sequencing data. Although most of the adapters are located at the 3' end of the sequencing read, Cutadapt allows multiple adapter removal from both 3' and 5' ends.
 
 
 The basic usage of Cutadapt is:
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md
index 26a236f42357daa1816829fa14e5d7fc264f8e80..09d2fd44d21f6486c08993823090fe8c904eedb3 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[PRINSEQ (PReprocessing and INformation of SEQuence data)] (http://prinseq.sourceforge.net/) is a tool used for filtering, formatting or trimming genome and metagenomic sequence data in fasta/fastq format. Moreover, PRINSEQ generates summary statistics of sequence and quality data.
+[PRINSEQ (PReprocessing and INformation of SEQuence data)](http://prinseq.sourceforge.net/) is a tool used for filtering, formatting or trimming genome and metagenomic sequence data in fasta/fastq format. Moreover, PRINSEQ generates summary statistics of sequence and quality data.
 
 More information about the PRINSEQ program can be shown with:
 {{< highlight bash >}}
@@ -19,7 +19,7 @@ The basic usage of PRINSEQ for single-end data is:
 {{< highlight bash >}}
 $ prinseq-lite.pl [-fasta|-fastq] input_reads.[fasta|fastq] -out_format [1|2|3|4|5] [options]
 {{< /highlight >}}
-where **input_reads.[fasta|fastq]** is an input file of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the [PRINSEQ manual] (http://prinseq.sourceforge.net/manual.html).
+where **input_reads.[fasta|fastq]** is an input file of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the [PRINSEQ manual](http://prinseq.sourceforge.net/manual.html).
 
 The output format (`-out_format`) can be **1** (fasta only), **2** (fasta and qual), **3** (fastq), **4** (fastq and input fasta), and **5** (fastq, fasta and qual).
 
@@ -51,7 +51,7 @@ The basic usage of PRINSEQ for paired-end data is:
 {{< highlight bash >}}
 $ prinseq-lite.pl [-fasta|-fastq] input_reads_pair_1.[fasta|fastq] [-fasta2|-fastq2] input_reads_pair_2.[fasta|fastq] -out_format [1|2|3|4|5] [options]
 {{< /highlight >}}
-where **input_reads_pair_1.[fasta|fastq]** and **input_reads_pair_2.[fasta|fastq]** are pair 1 and pair 2 of the input files of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the the [PRINSEQ manual] (http://prinseq.sourceforge.net/manual.html).
+where **input_reads_pair_1.[fasta|fastq]** and **input_reads_pair_2.[fasta|fastq]** are pair 1 and pair 2 of the input files of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the the [PRINSEQ manual](http://prinseq.sourceforge.net/manual.html).
 
 The output format (`-out_format`) can be **1** (fasta only), **2** (fasta and qual), **3** (fastq), **4** (fastq and input fasta), and **5** (fastq, fasta and qual).
 
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md
index 0b18b6cd09e57f8328c01a97a3e83f2af063def9..e907b01363814051449e623e54dd8f48ed7a0bad 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Scythe] (https://github.com/vsbuffalo/scythe) is a 3' end adapter trimmer that uses a Naive Bayesian approach to classify contaminant substrings in sequence reads. 3' ends often include poor quality bases which need to be removed prior the quality-based trimming, mapping, assemblies, and further analysis.
+[Scythe](https://github.com/vsbuffalo/scythe) is a 3' end adapter trimmer that uses a Naive Bayesian approach to classify contaminant substrings in sequence reads. 3' ends often include poor quality bases which need to be removed prior the quality-based trimming, mapping, assemblies, and further analysis.
 
 
 The basic usage of Scythe is:
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md
index 6062f2cc9e3e57c9ec20564647b2b260f527143b..7550b45007ddef8abed6be6d5a9ad2bc960ef5c4 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Sickle] (https://github.com/najoshi/sickle) is a windowed adaptive trimming tools for fastq files. Beside sliding window, Sickle uses quality and length thresholds to determine and trim low quality bases at both 3' end and 5' end of the reads.
+[Sickle](https://github.com/najoshi/sickle) is a windowed adaptive trimming tools for fastq files. Beside sliding window, Sickle uses quality and length thresholds to determine and trim low quality bases at both 3' end and 5' end of the reads.
 
 
 Information about the Sickle command-line options can be shown by typing:
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md
index 05e1f358b844967c12a093b6bb8e1a68173969a4..eeb9a893dfdf80cfe7493d2b8991118592f4bcd5 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md
@@ -5,13 +5,13 @@ weight = "10"
 +++
 
 
-[TagCleaner] (http://tagcleaner.sourceforge.net/) is a tool used to automatically detect and remove tag sequences from genomic and metagenomic sequence data. These additional tag sequences can contain deletions or insertions due to sequencing limitations.
+[TagCleaner](http://tagcleaner.sourceforge.net/) is a tool used to automatically detect and remove tag sequences from genomic and metagenomic sequence data. These additional tag sequences can contain deletions or insertions due to sequencing limitations.
 
 The basic usage of TagCleaner is:
 {{< highlight bash >}}
 $ tagcleaner.pl [-fasta|-fastq] input_reads.[fasta|fastq] [-predict|-tag3|-tag5] [options]
 {{< /highlight >}}
-where **input_reads.[fasta|fastq]** is an input file of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the [TagCleaner manual] (http://tagcleaner.sourceforge.net/manual.html).
+where **input_reads.[fasta|fastq]** is an input file of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the [TagCleaner manual](http://tagcleaner.sourceforge.net/manual.html).
 
 
 Required parameter for TagCleaner is the tag sequence. If the tag sequence is unknown, then the **-predict** option will provide the predicted tag sequence to the user. If the tag sequence is known and is found at the 3' end of the read, then the option **-tag3 &lt;tag_sequence&gt;** is used. If the tag sequence is known and is found at the 5' end of the read, the the option **-tag5 &lt;tag_sequence&gt;** is used.
diff --git a/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
index cfde2a04e6c8998079bb19e1b6ef27e749136ce4..711471e185605e7611dcd1f04abacc6fb65d896f 100644
--- a/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
+++ b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Cufflinks] (http://cole-trapnell-lab.github.io/cufflinks/) is a transcript assembly program that includes a number of tools for analyzing RNA-Seq data. These tools assemble aligned RNA-Seq reads into transcripts, estimate their abundances, test for differential expression and regulation, and provide transcript quantification. Some of the tools part of Cufflinks can be run individually, while others are part of a larger workflow.
+[Cufflinks](http://cole-trapnell-lab.github.io/cufflinks/) is a transcript assembly program that includes a number of tools for analyzing RNA-Seq data. These tools assemble aligned RNA-Seq reads into transcripts, estimate their abundances, test for differential expression and regulation, and provide transcript quantification. Some of the tools part of Cufflinks can be run individually, while others are part of a larger workflow.
 
 The basic usage of Cufflinks is:
 {{< highlight bash >}}
@@ -13,7 +13,7 @@ $ cufflinks [options] input_alignments.[sam|bam]
 {{< /highlight >}}
 where `input_alignments.[sam|bam]` is sorted input file of RNA-Seq read alignments in SAM/BAM format. The RNA-Seq read mapper TopHat/TopHat2 produces output in this format and is recommended to be used with Cufflinks, although SAM/BAM alignments produced from any aligner are accepted. 
 
-More advanced Cufflinks options can be found in [the manual] (http://cole-trapnell-lab.github.io/cufflinks/manual/) or by typing:
+More advanced Cufflinks options can be found in [the manual](http://cole-trapnell-lab.github.io/cufflinks/manual/) or by typing:
 {{< highlight bash >}}
 $ cufflinks -h
 {{< /highlight >}}
diff --git a/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
index b752007b62b967ca8e85e0b643475404581f8c74..4fa711fc8dc746afa343a98d56ada2c21b43dc20 100644
--- a/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
+++ b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
@@ -17,7 +17,7 @@ Some of the most frequently used executables from the CD-HIT package are: CD-HIT
 - CD-HIT-LAP identifies overlapping reads
 
 Detailed overview of the whole CD-HIT package and executables can be found in the 
-[CD-HIT user's guide] (http://weizhongli-lab.org/lab-wiki/doku.php?id=cd-hit-user-guide).
+[CD-HIT user's guide](http://weizhongli-lab.org/lab-wiki/doku.php?id=cd-hit-user-guide).
 
 
 The basic usage of CD-HIT is:
diff --git a/content/applications/app_specific/fortran_c_on_hcc.md b/content/applications/app_specific/fortran_c_on_hcc.md
index 3c41ebfc36f5be1d4428f2435e23c5da96971faa..877bd5d0a1752023658c974029264a21a0acaee4 100644
--- a/content/applications/app_specific/fortran_c_on_hcc.md
+++ b/content/applications/app_specific/fortran_c_on_hcc.md
@@ -102,7 +102,7 @@ Collection, `gcc`, for demonstration. Other available compilers such as
 line `module avail`.  Before compiling the code, make sure there is no 
 dependency on any numerical library in the code. If invoking a numerical 
 library is necessary, contact a HCC specialist 
-({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)) to
+({{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)) to
 discuss implementation options.
 
 {{< highlight bash >}}
diff --git a/content/applications/app_specific/running_gaussian_at_hcc.md b/content/applications/app_specific/running_gaussian_at_hcc.md
index b3d5bf65a32cd646c71849b9d9ccd3fbd6cf8a77..45aa43e6695ea8c60149a1b984a08f85a80508a6 100644
--- a/content/applications/app_specific/running_gaussian_at_hcc.md
+++ b/content/applications/app_specific/running_gaussian_at_hcc.md
@@ -19,7 +19,7 @@ Kearney are not allowed to run **g09** program at HCC without purchase
 of a **g09** license.
 
 For access, contact us at
- {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+ {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 and include your HCC username. After your account has been added to the
 group "*gauss*", here are four simple steps to run Gaussian 09 on Crane:
 
diff --git a/content/connecting/mobaxterm.md b/content/connecting/mobaxterm.md
index 0bed8e41c98cc7d63795b7bc009d6c5adfb36c0d..c774b70553935dc2f99c31afdce719fe067f0dbd 100644
--- a/content/connecting/mobaxterm.md
+++ b/content/connecting/mobaxterm.md
@@ -19,7 +19,7 @@ To connect to HCC resources using MobaXterm, open the application and select the
 Select SSH as the session type. Enter the cluster you are connecting to, in the example, `crane.unl.edu`, is used.  Check `Specify username`  and enter your HCC username in the the box. Note that &lt;username&gt;
 should be replaced by your HCC account username. If you do not have a
 HCC account, please contact a HCC specialist
-({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu))
+({{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu))
 or go to https://hcc.unl.edu/newusers.
 To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu.
 {{< figure src="/images/moba/session.png" height="450" >}}
diff --git a/content/connecting/putty.md b/content/connecting/putty.md
index 8f7b52df66c0217fc46a64b87f9e2b62b1b676bb..6bcfcd7e75d58443847b6d0be36adeeba390f164 100644
--- a/content/connecting/putty.md
+++ b/content/connecting/putty.md
@@ -53,7 +53,7 @@ you want to connect to the `Rhino` cluster.
 
 3.  On the third screen, enter your HCC account **username**. If you do
     not have a HCC account, please contact an HCC specialist
-    ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu))
+    ({{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu))
     or go to http://hcc.unl.edu/newusers.
 
     {{% notice info %}}Replace `cbohn` with your username.{{% /notice %}}
@@ -85,7 +85,7 @@ you want to connect to the `Rhino` cluster.
 8.  Check your smart phone for Duo login request. Press "Approve" if you
     can verify the request. If you find any Duo login request that is
     not initiated by yourself, deny it and report this incident
-    immediately to {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+    immediately to {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
 
     {{< figure src="/images/8127263.png" height="450" >}}      
       
diff --git a/content/connecting/terminal.md b/content/connecting/terminal.md
index 785ac13b64324f80db8fe4503995c1b16edfe17c..ba30986d3f8b4e8e3c031dc70d2d4d5ab254a98f 100644
--- a/content/connecting/terminal.md
+++ b/content/connecting/terminal.md
@@ -7,7 +7,7 @@ weight = "10"
 ##### Use of Duo two-factor authentication is **required** to access HCC resources.
 ##### Please see [Setting up and Using Duo]({{< relref "setting_up_and_using_duo" >}}).
 ---
-- [Starting Terminal] (#starting-terminal)
+- [Starting Terminal](#starting-terminal)
   - [MacOS](#macos)
   - [Linux](#linux)
   - [Windows](#windows)
diff --git a/content/contact_us/_index.md b/content/contact_us/_index.md
index 2cf841eb83a18333b843e831b7eec379bae5eddb..62a87db661ade7d74e81edca951e338a0a92023c 100644
--- a/content/contact_us/_index.md
+++ b/content/contact_us/_index.md
@@ -5,7 +5,7 @@ weight = "100"
 +++
 
 If you have questions, please contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 or stop by one of our locations.
 
 |                  Lincoln                        |                     Omaha          |
diff --git a/content/good_hcc_practices/_index.md b/content/good_hcc_practices/_index.md
index cd1c03dc6e89df2e5b2ad7fcc786d70300c8a68c..c7b07cad7793e65244a071388d4d11712056cd4b 100644
--- a/content/good_hcc_practices/_index.md
+++ b/content/good_hcc_practices/_index.md
@@ -73,4 +73,4 @@ and [mem_report]({{< relref "monitoring_jobs" >}}). While these tools can not pr
 useful information the researcher can use the next time that particular application is run.
 
 We strongly recommend you to read and follow this guidance. If you have any concerns about your workflows or need any 
-assistance, please contact HCC Support at {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+assistance, please contact HCC Support at {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
diff --git a/content/handling_data/data_storage/linux_file_permissions.md b/content/handling_data/data_storage/linux_file_permissions.md
index 9a3d709945b45a42efe74850471671a34e7252e8..4739cbb2118ed0d8c939c87fbf026956cb1b9048 100644
--- a/content/handling_data/data_storage/linux_file_permissions.md
+++ b/content/handling_data/data_storage/linux_file_permissions.md
@@ -4,9 +4,9 @@ description = "How to view and change file permissions with Linux commands"
 weight = 20
 +++
 
-- [Opening a Terminal Window] (#opening-a-terminal-window)
-- [Listing File Permissions] (#listing-file-permissions)
-- [Changing File Permissions] (#changing-file-permissions)
+- [Opening a Terminal Window](#opening-a-terminal-window)
+- [Listing File Permissions](#listing-file-permissions)
+- [Changing File Permissions](#changing-file-permissions)
 
 ## Opening a Terminal Window
 -----------------------
diff --git a/content/intro/_index.md b/content/intro/_index.md
index bab84a3973a2f2d65dc91824add3d6e20fbb11d9..0ab86ab0e1d5f8c53a7e83e522f0fd8ab904243b 100644
--- a/content/intro/_index.md
+++ b/content/intro/_index.md
@@ -21,7 +21,7 @@ such as HTCondor or SLURM.
 
 #### Login Node:
 Users will automatically land on the login node when they log in to the clusters. 
-You will [submit jobs] ({{< ref "/submitting_jobs" >}}) using one of the schedulers 
+You will [submit jobs]({{< ref "/submitting_jobs" >}}) using one of the schedulers 
 and pull the results of your jobs. Any jobs running on the login node directly will be 
 stopped so others can use the login node to submit jobs.
 
diff --git a/content/submitting_jobs/partitions/_index.md b/content/submitting_jobs/partitions/_index.md
index 88466f8e78b589cddeeb0d27bc3db882dcfdb0a4..5ab7adae7774913ec95278c999e1a65966f84808 100644
--- a/content/submitting_jobs/partitions/_index.md
+++ b/content/submitting_jobs/partitions/_index.md
@@ -59,7 +59,7 @@ Partitions marked as owned by a group means only specific groups are
 allowed to submit jobs to that partition.  Groups are manually added to
 the list allowed to submit jobs to the partition.  If you are unable to
 submit jobs to a partition, and you feel that you should be, please
-contact {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+contact {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
 
 ### Guest Partition