From 7399a2cf3f4aaddbf82b34e5d47b2d71b1af67d7 Mon Sep 17 00:00:00 2001
From: John Thiltges <jthiltges2@unl.edu>
Date: Wed, 15 Apr 2020 15:58:31 -0500
Subject: [PATCH] Correct markdown links (removing space) and switch renderer
 to Hugo default

---
 config.toml                                            |  8 ++------
 content/Events/2015/hcc_fall_kickstart_2015.md         |  2 +-
 content/FAQ/_index.md                                  | 10 +++++-----
 content/_footer.md                                     |  2 +-
 content/accounts/setting_up_and_using_duo.md           |  8 ++++----
 content/anvil/_index.md                                |  4 ++--
 content/anvil/available_images.md                      |  2 +-
 content/anvil/resizing_an_instance.md                  |  2 +-
 .../anvil/what_are_the_per_group_resource_limits.md    |  2 +-
 content/applications/app_specific/Jupyter.md           | 10 +++++-----
 .../allinea_performance_reports/_index.md              |  2 +-
 .../alignment_tools/blast/_index.md                    |  2 +-
 .../alignment_tools/blast/running_blast_alignment.md   |  8 ++++----
 .../bioinformatics_tools/alignment_tools/blat.md       |  2 +-
 .../bioinformatics_tools/alignment_tools/bowtie.md     |  4 ++--
 .../bioinformatics_tools/alignment_tools/bowtie2.md    |  4 ++--
 .../bioinformatics_tools/alignment_tools/bwa/_index.md |  2 +-
 .../alignment_tools/clustal_omega.md                   |  2 +-
 .../alignment_tools/tophat_tophat2.md                  |  4 ++--
 .../data_manipulation_tools/samtools/_index.md         |  4 ++--
 .../data_manipulation_tools/sratoolkit.md              |  2 +-
 .../de_novo_assembly_tools/oases.md                    |  4 ++--
 .../bioinformatics_tools/de_novo_assembly_tools/ray.md |  2 +-
 .../de_novo_assembly_tools/soapdenovo2.md              |  2 +-
 .../de_novo_assembly_tools/trinity/_index.md           |  2 +-
 .../de_novo_assembly_tools/velvet/_index.md            |  4 ++--
 .../pre_processing_tools/cutadapt.md                   |  2 +-
 .../pre_processing_tools/prinseq.md                    |  6 +++---
 .../pre_processing_tools/scythe.md                     |  2 +-
 .../pre_processing_tools/sickle.md                     |  2 +-
 .../pre_processing_tools/tagcleaner.md                 |  4 ++--
 .../reference_based_assembly_tools/cufflinks.md        |  4 ++--
 .../removing_detecting_redundant_sequences/cd_hit.md   |  2 +-
 content/applications/app_specific/fortran_c_on_hcc.md  |  2 +-
 .../app_specific/running_gaussian_at_hcc.md            |  2 +-
 content/connecting/mobaxterm.md                        |  2 +-
 content/connecting/putty.md                            |  4 ++--
 content/connecting/terminal.md                         |  2 +-
 content/contact_us/_index.md                           |  2 +-
 content/good_hcc_practices/_index.md                   |  2 +-
 .../data_storage/linux_file_permissions.md             |  6 +++---
 content/intro/_index.md                                |  2 +-
 content/submitting_jobs/partitions/_index.md           |  2 +-
 43 files changed, 72 insertions(+), 76 deletions(-)

diff --git a/config.toml b/config.toml
index 1d896582..5a883fd3 100644
--- a/config.toml
+++ b/config.toml
@@ -42,9 +42,5 @@ weight = 10
 
 # Hugo v0.60+ switched renderer from blackfriday to goldmark
 # - Allow unsafe for docdock template rendering
-#[markup.goldmark.renderer]
-#unsafe = true
-
-# FIXME: Use blackfriday renderer until we update the content to render with goldmark
-[markup]
-defaultMarkdownHandler = "blackfriday"
+[markup.goldmark.renderer]
+unsafe = true
diff --git a/content/Events/2015/hcc_fall_kickstart_2015.md b/content/Events/2015/hcc_fall_kickstart_2015.md
index 713201f4..eef8b2e4 100644
--- a/content/Events/2015/hcc_fall_kickstart_2015.md
+++ b/content/Events/2015/hcc_fall_kickstart_2015.md
@@ -15,7 +15,7 @@ Materials
 Software Carpentry
 Lessons: <a href="http://eharstad.github.io/2015-09-08-UNL/" class="external-link">http://eharstad.github.io/2015-09-08-UNL/</a>
 
-[Slides for Day 1] (https://unl.box.com/s/3tz0e3tbxzx9wt8s5l65e1w3fm1w4ojx) (Software Carpentry Introduction)
+[Slides for Day 1](https://unl.box.com/s/3tz0e3tbxzx9wt8s5l65e1w3fm1w4ojx) (Software Carpentry Introduction)
 
 [Slides for Day 2](https://unl.box.com/s/8ckxlt7f9geiphiomjodesx9a2nw664b) (Morning Session)
 
diff --git a/content/FAQ/_index.md b/content/FAQ/_index.md
index a130cf58..e653fe1f 100644
--- a/content/FAQ/_index.md
+++ b/content/FAQ/_index.md
@@ -57,7 +57,7 @@ they were at the time of our last backup. Please note that any changes
 made to the files between when the backup was made and when you deleted
 them will not be preserved. To have these files restored, please contact
 HCC Support at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 as soon as possible.
 
 **If the files were in your $WORK directory (/work/group/user/):** No.
@@ -78,7 +78,7 @@ Please stop by
 [our offices](http://hcc.unl.edu/location)
 along with a photo ID and we will be happy to activate it for you. If
 you are not local to Omaha or Lincoln, contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 and we will help you activate Duo remotely.
 
 **If you have activated Duo previously but now have a different phone
@@ -90,7 +90,7 @@ Duo and update your account with your new phone number.
 **If you have activated Duo previously and have the same phone number:**
 
 Email us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 from the email address your account is registered under and we will send
 you a new link that you can use to activate Duo.
 
@@ -132,7 +132,7 @@ is produced.
 **If you are running from inside your $WORK directory:**
 
 Contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 with your login, the name of the cluster you are running on, and the
 full path to your submit script and we will be happy to help solve the
 issue.
@@ -160,7 +160,7 @@ For additional details on how to monitor usage on jobs, check out the
 documentation on [Monitoring Jobs]({{< relref "monitoring_jobs" >}}).
 
 If you continue to run into issues, please contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 for additional assistance.
 
 #### I want to talk to a human about my problem. Can I do that?
diff --git a/content/_footer.md b/content/_footer.md
index 7a9bc1ef..19f57989 100644
--- a/content/_footer.md
+++ b/content/_footer.md
@@ -2,6 +2,6 @@
   title = "Footer"
 +++
 
-{{< icon name="copyright-mark" >}} [Holland Computing Center] (https://hcc.unl.edu) | 118 Schorr Center, Lincoln NE 68588 | {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) | {{< icon name="phone-alt" >}}402-472-5041
+{{< icon name="copyright-mark" >}} [Holland Computing Center](https://hcc.unl.edu) | 118 Schorr Center, Lincoln NE 68588 | {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu) | {{< icon name="phone-alt" >}}402-472-5041
 
 See something wrong?  Help us fix it by [contributing](https://git.unl.edu/hcc/hcc-docs/blob/master/CONTRIBUTING.md)!
diff --git a/content/accounts/setting_up_and_using_duo.md b/content/accounts/setting_up_and_using_duo.md
index 723c840f..25d013cc 100644
--- a/content/accounts/setting_up_and_using_duo.md
+++ b/content/accounts/setting_up_and_using_duo.md
@@ -35,13 +35,13 @@ If you *are not* currently using Duo with your TrueYou account:
     Faculty/staff members with a verified NU telephone number can enroll by
     phone. If you would like an HCC staff member to call your NU telephone
     number to enroll, please email
-    {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+    {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
     with a time you will be available.
 
 If you *are* currently using Duo with your TrueYou account:
 
 1.  You can request to use the same phone for HCC's Duo as you are using for TrueYou.
-    Please contact [hcc-support@unl.edu] (mailto:hcc-support@unl.edu) with the request
+    Please contact [hcc-support@unl.edu](mailto:hcc-support@unl.edu) with the request
     using the email address associated with your TrueYou account. In the email, include
     the last 4 digits of the phone number for verification.
 
@@ -72,7 +72,7 @@ exactly as before.
 
 After 10 failed authentication attempts, the user's account is
 disabled. If this is the case, then the user needs to send an email to
-[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 including his/her username and the reason why multiple failed
 authentication attempts occurred.  
 {{% /notice %}}
@@ -92,7 +92,7 @@ Simply tap `Approve` to verify the login.
 
 {{% notice warning%}}**If you receive a verification request you didn't initiate, deny the 
 request and contact HCC immediately via email at
-[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)**
+[hcc-support@unl.edu](mailto:hcc-support@unl.edu)**
 {{% /notice %}}
 
 In the terminal, the login will now complete and the user will logged in
diff --git a/content/anvil/_index.md b/content/anvil/_index.md
index bd4fc812..f0ba40a5 100644
--- a/content/anvil/_index.md
+++ b/content/anvil/_index.md
@@ -173,7 +173,7 @@ precious or irreproducible data should not be placed or left on Anvil**.
     subscription is required.  As part of HCC's Globus Provider Plan,
     HCC can provide this on a per-user basis free of charge.  If you are
     interested in Globus Plus, please email
-    {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+    {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
     with your request and a brief explanation.
 
 ## Backups
@@ -192,6 +192,6 @@ disaster recovery backups should not be the only source of backups for
 important data. The backup policies are subject to change without prior
 notice. To retrieve your backups, please contact HCC. If you have
 special concerns please contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
 
 
diff --git a/content/anvil/available_images.md b/content/anvil/available_images.md
index 66e47a2b..6f4493dd 100644
--- a/content/anvil/available_images.md
+++ b/content/anvil/available_images.md
@@ -13,5 +13,5 @@ list of available images.
 {{< /sorttable >}}
 
 Additional images can be produced by HCC staff by request at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
  
diff --git a/content/anvil/resizing_an_instance.md b/content/anvil/resizing_an_instance.md
index fab02357..a0461e1f 100644
--- a/content/anvil/resizing_an_instance.md
+++ b/content/anvil/resizing_an_instance.md
@@ -63,4 +63,4 @@ At this point, your instance should have a status of *Active*. Connect
 to your instance and ensure that everything works as expected.
 
 {{% notice warning %}}
-If your instance ends in an "Error" state or you have any issues or questions with resizing, please contact us at [hcc-support@unl.edu] (mailto:hcc-support@unl.edu) for assistance.{{% /notice %}}
+If your instance ends in an "Error" state or you have any issues or questions with resizing, please contact us at [hcc-support@unl.edu](mailto:hcc-support@unl.edu) for assistance.{{% /notice %}}
diff --git a/content/anvil/what_are_the_per_group_resource_limits.md b/content/anvil/what_are_the_per_group_resource_limits.md
index 4a44f23b..a2cc67fc 100644
--- a/content/anvil/what_are_the_per_group_resource_limits.md
+++ b/content/anvil/what_are_the_per_group_resource_limits.md
@@ -26,6 +26,6 @@ resources, that can be accommodated on a fee basis, depending on the
 amount needed.  Please see the HCC
 [Priority Access Pricing](http://hcc.unl.edu/priority-access-pricing) page for specific costs.
 *By default, no public IP addresses are provided.*
-Please contact {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+Please contact {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 for more details.
 
diff --git a/content/applications/app_specific/Jupyter.md b/content/applications/app_specific/Jupyter.md
index d738c1c9..135e3e82 100644
--- a/content/applications/app_specific/Jupyter.md
+++ b/content/applications/app_specific/Jupyter.md
@@ -4,10 +4,10 @@ description = "How to access and use a Jupyter Notebook"
 weight = 20
 +++
 
-- [Connecting to JupyterHub] (#connecting-to-jupyterhub)
-- [Running Code] (#running-code)
-- [Opening a Terminal] (#opening-a-terminal)
-- [Using Custom Packages] (#using-custom-packages)
+- [Connecting to JupyterHub](#connecting-to-jupyterhub)
+- [Running Code](#running-code)
+- [Opening a Terminal](#opening-a-terminal)
+- [Using Custom Packages](#using-custom-packages)
 
 ## Connecting to JupyterHub
 -----------------------
@@ -42,7 +42,7 @@ Select the other options based on your computing needs. Note that a SLURM Job wi
 
 1.	From your user home page, select "terminal" from the "New" drop-down menu.
 {{< figure src="/images/jupyterTerminal.png">}}
-2.	A terminal opens in a new tab. You can enter [Linux commands] ({{< relref "basic_linux_commands" >}})
+2.	A terminal opens in a new tab. You can enter [Linux commands]({{< relref "basic_linux_commands" >}})
  at the prompt.
 {{< figure src="/images/jupyterTerminal2.png">}}
 
diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
index aa907605..c3cb52f9 100644
--- a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
+++ b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
@@ -46,7 +46,7 @@ The basic usage of ``perf-report` is:
 {{< highlight bash >}}
 perf-report [OPTION...] PROGRAM [PROGRAM_ARGS]
 or
-perf-report [OPTION...] (mpirun|mpiexec|aprun|...) [MPI_ARGS] PROGRAM [PROGRAM_ARGS]
+perf-report [OPTION...](mpirun|mpiexec|aprun|...) [MPI_ARGS] PROGRAM [PROGRAM_ARGS]
 {{< /highlight >}}
 {{% /panel %}}
 
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md
index 3fbde63b..c37870f7 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md
@@ -5,7 +5,7 @@ weight = "52"
 +++
 
 
-[BLAST] (https://blast.ncbi.nlm.nih.gov/Blast.cgi) is a local alignment tool that finds similarity between sequences. This tool compares nucleotide or protein sequences to sequence databases, and calculates significance of matches. Sometimes these input sequences are large and using the command-line BLAST is required.
+[BLAST](https://blast.ncbi.nlm.nih.gov/Blast.cgi) is a local alignment tool that finds similarity between sequences. This tool compares nucleotide or protein sequences to sequence databases, and calculates significance of matches. Sometimes these input sequences are large and using the command-line BLAST is required.
 
 The following pages, [Create Local BLAST Database]({{<relref "create_local_blast_database" >}}) and [Running BLAST Alignment]({{<relref "running_blast_alignment" >}}) describe how to run some of the most common BLAST executables as a single job using the SLURM scheduler on HCC.
 
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
index 936c2ebb..e4de1150 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
@@ -20,7 +20,7 @@ $ blastn -query input_reads.fasta -db input_reads_db -out blastn_output.alignmen
 {{< /highlight >}}
 where **input_reads.fasta** is an input file of sequence data in fasta format, **input_reads_db** is the generated BLAST database, and **blastn_output.alignments** is the output file where the alignments are stored.
 
-Additional parameters can be found in the [BLAST manual] (https://www.ncbi.nlm.nih.gov/books/NBK279690/), or by typing:
+Additional parameters can be found in the [BLAST manual](https://www.ncbi.nlm.nih.gov/books/NBK279690/), or by typing:
 {{< highlight bash >}}
 $ blastn -help
 {{< /highlight >}}
@@ -28,7 +28,7 @@ $ blastn -help
 These BLAST alignment commands are multi-threaded, and therefore using the BLAST option **-num_threads <number_of_CPUs>** is recommended.
 
 
-HCC hosts multiple BLAST databases and indices on Crane. In order to use these resources, the ["biodata" module] ({{<relref "/applications/app_specific/bioinformatics_tools/biodata_module">}}) needs to be loaded first. The **$BLAST** variable contains the following currently available databases:
+HCC hosts multiple BLAST databases and indices on Crane. In order to use these resources, the ["biodata" module]({{<relref "/applications/app_specific/bioinformatics_tools/biodata_module">}}) needs to be loaded first. The **$BLAST** variable contains the following currently available databases:
 
 - **16SMicrobial**
 - **nr**
@@ -37,7 +37,7 @@ HCC hosts multiple BLAST databases and indices on Crane. In order to use these r
 - **refseq_rna**
 - **swissprot**
 
-If you want to create and use a BLAST database that is not mentioned above, check [Create Local BLAST Database]({{<relref "create_local_blast_database" >}}). If you want a database to be added to the ["biodata" module] ({{<relref "/applications/app_specific/bioinformatics_tools/biodata_module">}}), please send a request to bcrf-support@unl.edu.
+If you want to create and use a BLAST database that is not mentioned above, check [Create Local BLAST Database]({{<relref "create_local_blast_database" >}}). If you want a database to be added to the ["biodata" module]({{<relref "/applications/app_specific/bioinformatics_tools/biodata_module">}}), please send a request to bcrf-support@unl.edu.
 
 {{% notice info %}}
 **To access the older format of BLAST databases that work with BLAST+ 2.9 and lower, please use the variable BLAST_V4.**
@@ -81,7 +81,7 @@ $ blastn -query input_reads.fasta -db input_reads_db -out blastn_output.alignmen
 {{< /highlight >}}
 
 
-The default BLAST output is in pairwise format. However, BLAST’s parameter **-outfmt** supports output in [different formats] (https://www.ncbi.nlm.nih.gov/books/NBK279684/) that are easier for parsing.
+The default BLAST output is in pairwise format. However, BLAST’s parameter **-outfmt** supports output in [different formats](https://www.ncbi.nlm.nih.gov/books/NBK279684/) that are easier for parsing.
 
 
 Basic SLURM example of protein BLAST run against the non-redundant **nr **BLAST database with tabular output format and `8 CPUs` is shown below. Similarly as before, the query and database files are copied to the **/scratch/** directory. The BLAST output is also saved in this directory (**/scratch/blastx_output.alignments**). After BLAST finishes, the output file is copied from the worker node to your current work directory.
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md
index 5f2c3fb4..9113bca2 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md
@@ -15,7 +15,7 @@ $ blat database query output_alignment.txt [options]
 where **database** is the name of the database used for the alignment, **query** is the name of the input file of sequence data in `fasta/nib/2bit` format, and **output_alignment.txt** is the output alignment file.
 
 
-Additional parameters for BLAT alignment can be found in the [manual] (http://genome.ucsc.edu/FAQ/FAQblat), or by using:
+Additional parameters for BLAT alignment can be found in the [manual](http://genome.ucsc.edu/FAQ/FAQblat), or by using:
 {{< highlight bash >}}
 $ blat
 {{< /highlight >}}
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md
index 7a0670a6..41aaf03d 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Bowtie] (http://bowtie-bio.sourceforge.net/index.shtml) is an ultrafast and memory-efficient aligner for large sets of sequencing reads to a reference genome. Bowtie indexes the genome with a Burrows-Wheeler index to keep its memory footprint small. Bowtie also supports usage of multiple processors to achieve greater alignment speed.
+[Bowtie](http://bowtie-bio.sourceforge.net/index.shtml) is an ultrafast and memory-efficient aligner for large sets of sequencing reads to a reference genome. Bowtie indexes the genome with a Burrows-Wheeler index to keep its memory footprint small. Bowtie also supports usage of multiple processors to achieve greater alignment speed.
 
 
 The first and basic step of running Bowtie is to build and format an index from the reference genome. The basic usage of this command, **bowtie-build** is:
@@ -19,7 +19,7 @@ After the index of the reference genome is generated, the next step is to align
 $ bowtie [-q|-f|-r|-c] index_prefix [-1 input_reads_pair_1.[fasta|fastq] -2 input_reads_pair_2.[fasta|fastq] | input_reads.[fasta|fastq]] [options]
 {{< /highlight >}}
 where **index_prefix** is the generated index using the **bowtie-build** command, and **options** are optional parameters that can be found in the [Bowtie
-manual] (http://bowtie-bio.sourceforge.net/manual.shtml).
+manual](http://bowtie-bio.sourceforge.net/manual.shtml).
 
 
 Bowtie supports both single-end (`input_reads.[fasta|fastq]`) and paired-end (`input_reads_pair_1.[fasta|fastq]`, `input_reads_pair_2.[fasta|fastq]`) files in fasta or fastq format. The format of the input files also needs to be specified by using the following flags: **-q** (fastq files), **-f** (fasta files), **-r** (raw one-sequence per line), or **-c** (sequences given on command line).
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md
index 2fcb2817..8d091433 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Bowtie2] (http://bowtie-bio.sourceforge.net/bowtie2/index.shtml) is an ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. Although Bowtie and Bowtie2 are both fast read aligners, there are few main differences between them:
+[Bowtie2](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml) is an ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. Although Bowtie and Bowtie2 are both fast read aligners, there are few main differences between them:
 
 - Bowtie2 supports gapped alignment with affine gap penalties, without restrictions on the number of gaps and gap lengths.
 - Bowtie supports reads longer than 50bp and is generally faster, more sensitive, and uses less memory than Bowtie.
@@ -28,7 +28,7 @@ The command **bowtie2** takes a Bowtie2 index and set of sequencing read files a
 {{< highlight bash >}}
 $ bowtie2 -x index_prefix [-q|--qseq|-f|-r|-c] [-1 input_reads_pair_1.[fasta|fastq] -2 input_reads_pair_2.[fasta|fastq] | -U input_reads.[fasta|fastq]] -S bowtie2_alignments.sam [options]
 {{< /highlight >}}
-where **index_prefix** is the generated index using the **bowtie2-build** command, and **options** are optional parameters that can be found in the [Bowtie2 manual] (http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml). Bowtie2 supports both single-end (`input_reads.[fasta|fastq]`) and paired-end (`input_reads_pair_1.[fasta|fastq]`, `input_reads_pair_2.[fasta|fastq]`) files in fasta or fastq format. The format of the input files also needs to be specified by using one of the following flags: **-q** (fastq files), **--qseq** (Illumina's qseq format), **-f** (fasta files), **-r** (raw one sequence per line), or **-c** (sequences given on command line).
+where **index_prefix** is the generated index using the **bowtie2-build** command, and **options** are optional parameters that can be found in the [Bowtie2 manual](http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml). Bowtie2 supports both single-end (`input_reads.[fasta|fastq]`) and paired-end (`input_reads_pair_1.[fasta|fastq]`, `input_reads_pair_2.[fasta|fastq]`) files in fasta or fastq format. The format of the input files also needs to be specified by using one of the following flags: **-q** (fastq files), **--qseq** (Illumina's qseq format), **-f** (fasta files), **-r** (raw one sequence per line), or **-c** (sequences given on command line).
 
 
 An example of how to run Bowtie2 local alignment on Crane with paired-end fasta files and `8 CPUs` is shown below:
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md
index 5b770061..0b35ec8d 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md
@@ -33,7 +33,7 @@ For detailed description and more information on a specific command, just type:
 {{< highlight bash >}}
 $  bwa COMMAND
 {{< /highlight >}}
-or check the [BWA manual] (http://bio-bwa.sourceforge.net/bwa.shtml).
+or check the [BWA manual](http://bio-bwa.sourceforge.net/bwa.shtml).
 
 
 The page [Running BWA Commands]({{<relref "running_bwa_commands" >}}) shows how to run BWA on HCC.
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md
index 94fbd172..6aeebfae 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Clustal Omega] (http://www.clustal.org/omega/) is a general purpose multiple sequence alignment (MSA) tool used mainly with protein, as well as DNA and RNA sequences. Clustal Omega is fast and scalable aligner that can align datasets of hundreds of thousands of sequences in reasonable time.
+[Clustal Omega](http://www.clustal.org/omega/) is a general purpose multiple sequence alignment (MSA) tool used mainly with protein, as well as DNA and RNA sequences. Clustal Omega is fast and scalable aligner that can align datasets of hundreds of thousands of sequences in reasonable time.
 
 
 The general usage of Clustal Omega is:
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md
index 05767431..b8047568 100644
--- a/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md
+++ b/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[TopHat] (https://ccb.jhu.edu/software/tophat/index.shtml) is a fast splice junction mapper for RNA-Seq data. It first aligns RNA-Seq reads to reference genomes using the ultra high-throughput short read aligner Bowtie, and then analyzes the mapping results to identify splice junctions between exons. 
+[TopHat](https://ccb.jhu.edu/software/tophat/index.shtml) is a fast splice junction mapper for RNA-Seq data. It first aligns RNA-Seq reads to reference genomes using the ultra high-throughput short read aligner Bowtie, and then analyzes the mapping results to identify splice junctions between exons. 
 
 Although there is no difference between the available options for both TopHat and TopHat2 and the number of output files, TopHat2 incorporates many significant improvements to TopHat. The TopHat package at HCC supports both **tophat** and **tophat2**.
 
@@ -19,7 +19,7 @@ where **index_prefix** is the basename of the genome index to be searched. This
 TopHat2 uses single or comma-separated list of paired-end and single-end reads in fasta or fastq format. The single-end reads need to be provided after the paired-end reads.
 
 
-More advanced TopHat2 options can be found in [its manual] (https://ccb.jhu.edu/software/tophat/manual.shtml), or by typing:
+More advanced TopHat2 options can be found in [its manual](https://ccb.jhu.edu/software/tophat/manual.shtml), or by typing:
 {{< highlight bash >}}
 $ tophat2 -h
 {{< /highlight >}}
diff --git a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
index df9a00e2..a3887e18 100644
--- a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
@@ -5,7 +5,7 @@ weight = "52"
 +++
 
 
-The SAM format is a standard format for storing large nucleotide sequence alignments. The BAM format is just the binary form from SAM. [SAMtools] (http://www.htslib.org/) is a toolkit for manipulating alignments in SAM/BAM format, including sorting, merging, indexing and generating alignments in a per-position format.
+The SAM format is a standard format for storing large nucleotide sequence alignments. The BAM format is just the binary form from SAM. [SAMtools](http://www.htslib.org/) is a toolkit for manipulating alignments in SAM/BAM format, including sorting, merging, indexing and generating alignments in a per-position format.
 
 
 The basic usage of SAMtools is:
@@ -39,7 +39,7 @@ For detailed description and more information on a specific command, just type:
 {{< highlight bash >}}
 $ samtools COMMAND
 {{< /highlight >}}
-or check the [SAMtools manual] (http://www.htslib.org/doc/samtools.html).
+or check the [SAMtools manual](http://www.htslib.org/doc/samtools.html).
 
 
 The page [Running SAMtools Commands]({{<relref "running_samtools_commands" >}}) shows how to run SAMtools on HCC.
diff --git a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
index 3cfaa501..d2704b9f 100644
--- a/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
+++ b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[SRA (Sequence Read Archive)] (http://www.ncbi.nlm.nih.gov/sra) is an NCBI-defined format for NGS data. Every data submitted to NCBI needs to be in SRA format. The SRA Toolkit provides tools for converting different formats of data into SRA format, and vice versa, extracting SRA data in other different formats.
+[SRA (Sequence Read Archive)](http://www.ncbi.nlm.nih.gov/sra) is an NCBI-defined format for NGS data. Every data submitted to NCBI needs to be in SRA format. The SRA Toolkit provides tools for converting different formats of data into SRA format, and vice versa, extracting SRA data in other different formats.
 
 The SRA Toolkit allows converting data from the SRA format to the following formats: `ABI SOLiD native`, `fasta`, `fastq`, `sff`, `sam`, and `Illumina native`. Also, the SRA Toolkit allows converting data from `fasta`, `fastq`, `AB SOLiD-SRF`, `AB SOLiD-native`, `Illumina SRF`, `Illumina native`, `sff`, and `bam` format into the SRA format.
 
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md
index 7f29d4a3..82278e88 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-Velvet by itself generates assembled contigs for DNA data. However, using the Oases extension for Velvet, a transcriptome assembly can be produced. [Oases] (https://www.ebi.ac.uk/~zerbino/oases/) is an extension of Velvet for generating de novo assembly for RNA-Seq data. Oases uses the preliminary assembly produced by Velvet as an input, and constructs transcripts.
+Velvet by itself generates assembled contigs for DNA data. However, using the Oases extension for Velvet, a transcriptome assembly can be produced. [Oases](https://www.ebi.ac.uk/~zerbino/oases/) is an extension of Velvet for generating de novo assembly for RNA-Seq data. Oases uses the preliminary assembly produced by Velvet as an input, and constructs transcripts.
 
 
 In order to be able to run Oases, after `velveth`, `velvetg` needs to be run with the `–read_trkg yes` option:
@@ -22,7 +22,7 @@ contigs.fa  Graph2  LastGraph  Log  PreGraph  Roadmaps  Sequences  stats.txt
 {{% /panel %}}
 
 
-Oases has a lot of parameters that can be found in its [manual] (https://www.ebi.ac.uk/~zerbino/oases/OasesManual.pdf). While Velvet is multi-threaded, Oases is not.
+Oases has a lot of parameters that can be found in its [manual](https://www.ebi.ac.uk/~zerbino/oases/OasesManual.pdf). While Velvet is multi-threaded, Oases is not.
 
 
 A simple SLURM script to run Oases on the Velvet output stored in `output_directory/` with minimum transcript length of `200` is shown below:
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md
index 795d1623..7e57db1f 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Ray] (http://denovoassembler.sourceforge.net/) is a de novo de Bruijn genome assembler that works with next-generation sequencing data (Illumina, 454, SOLiD). Ray is scalable and parallel software that takes advantage of multiple nodes and multiple CPUs using MPI (message passing interface).
+[Ray](http://denovoassembler.sourceforge.net/) is a de novo de Bruijn genome assembler that works with next-generation sequencing data (Illumina, 454, SOLiD). Ray is scalable and parallel software that takes advantage of multiple nodes and multiple CPUs using MPI (message passing interface).
 
 
 Ray can be used for building multiple applications:
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
index 30b83f56..d4248506 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[SOAPdenovo] (http://soap.genomics.org.cn/soapdenovo.html) is a de novo genome assembler for short reads. It is specially designed for Illumina GA short reads and large plant and animal genomes. SOAPdenovo2 is a newer version of SOAPdenovo with improved algorithm that reduces memory consumption, resolves more repeat regions, increases coverage, and optimizes the assembly for large genomes.
+[SOAPdenovo](http://soap.genomics.org.cn/soapdenovo.html) is a de novo genome assembler for short reads. It is specially designed for Illumina GA short reads and large plant and animal genomes. SOAPdenovo2 is a newer version of SOAPdenovo with improved algorithm that reduces memory consumption, resolves more repeat regions, increases coverage, and optimizes the assembly for large genomes.
 
 SOAPdenovo2 has two commands, **SOAPdenovo-63mer** and **SOAPdenovo-127mer**. The first one is suitable for assembly with k-mer values less than 63 bp, requires less memory and runs faster. The latter one works for k-mer values less than 127 bp.
 
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
index 3147c639..09067c00 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
@@ -5,7 +5,7 @@ weight = "52"
 +++
 
 
-[Trinity] (https://github.com/trinityrnaseq/trinityrnaseq/wiki) is a method for efficient and robust de novo reconstruction of transcriptomes from RNA-Seq data. Trinity combines three independent software modules: `Inchworm`, `Chrysalis`, and `Butterfly`. All these modules can be applied sequentially to process large RNA-Seq datasets.
+[Trinity](https://github.com/trinityrnaseq/trinityrnaseq/wiki) is a method for efficient and robust de novo reconstruction of transcriptomes from RNA-Seq data. Trinity combines three independent software modules: `Inchworm`, `Chrysalis`, and `Butterfly`. All these modules can be applied sequentially to process large RNA-Seq datasets.
 
 
 The basic usage of Trinity is:
diff --git a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
index 427424d7..0c14aea5 100644
--- a/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
+++ b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
@@ -5,9 +5,9 @@ weight = "52"
 +++
 
 
-[Velvet] (https://www.ebi.ac.uk/~zerbino/velvet/) is a general sequence assembler designed to produce assembly from short, as well as long reads. Running Velvet consists of a sequence of two commands **velveth** and **velvetg**. **velveth** produces a hash table of k-mers, while **velvetg** constructs the genome assembly. The k-mer length, also known as hash length corresponds to the length, in base pairs, of the words of the reads being hashed.
+[Velvet](https://www.ebi.ac.uk/~zerbino/velvet/) is a general sequence assembler designed to produce assembly from short, as well as long reads. Running Velvet consists of a sequence of two commands **velveth** and **velvetg**. **velveth** produces a hash table of k-mers, while **velvetg** constructs the genome assembly. The k-mer length, also known as hash length corresponds to the length, in base pairs, of the words of the reads being hashed.
 
-Velvet has lots of parameters that can be found in its [manual] (https://www.ebi.ac.uk/~zerbino/velvet/Manual.pdf). However, the k-mer value is crucial in obtaining optimal assemblies. Higher k-mer values increase the specificity, and lower k-mer values increase the sensitivity.
+Velvet has lots of parameters that can be found in its [manual](https://www.ebi.ac.uk/~zerbino/velvet/Manual.pdf). However, the k-mer value is crucial in obtaining optimal assemblies. Higher k-mer values increase the specificity, and lower k-mer values increase the sensitivity.
 
 Velvet supports multiple file formats: `fasta`, `fastq`, `fasta.gz`, `fastq.gz`, `sam`, `bam`, `eland`, `gerald`. Velvet also supports different read categories for different sequencing technologies and libraries, e.g. `short`, `shortPaired`, `short2`, `shortPaired2`, `long`, `longPaired`.
 
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md
index 779a7e64..6f8770f0 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Cutadapt] (https://cutadapt.readthedocs.io/en/stable/index.html) is a tool for removing adapter sequences from DNA sequencing data. Although most of the adapters are located at the 3' end of the sequencing read, Cutadapt allows multiple adapter removal from both 3' and 5' ends.
+[Cutadapt](https://cutadapt.readthedocs.io/en/stable/index.html) is a tool for removing adapter sequences from DNA sequencing data. Although most of the adapters are located at the 3' end of the sequencing read, Cutadapt allows multiple adapter removal from both 3' and 5' ends.
 
 
 The basic usage of Cutadapt is:
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md
index 26a236f4..09d2fd44 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[PRINSEQ (PReprocessing and INformation of SEQuence data)] (http://prinseq.sourceforge.net/) is a tool used for filtering, formatting or trimming genome and metagenomic sequence data in fasta/fastq format. Moreover, PRINSEQ generates summary statistics of sequence and quality data.
+[PRINSEQ (PReprocessing and INformation of SEQuence data)](http://prinseq.sourceforge.net/) is a tool used for filtering, formatting or trimming genome and metagenomic sequence data in fasta/fastq format. Moreover, PRINSEQ generates summary statistics of sequence and quality data.
 
 More information about the PRINSEQ program can be shown with:
 {{< highlight bash >}}
@@ -19,7 +19,7 @@ The basic usage of PRINSEQ for single-end data is:
 {{< highlight bash >}}
 $ prinseq-lite.pl [-fasta|-fastq] input_reads.[fasta|fastq] -out_format [1|2|3|4|5] [options]
 {{< /highlight >}}
-where **input_reads.[fasta|fastq]** is an input file of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the [PRINSEQ manual] (http://prinseq.sourceforge.net/manual.html).
+where **input_reads.[fasta|fastq]** is an input file of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the [PRINSEQ manual](http://prinseq.sourceforge.net/manual.html).
 
 The output format (`-out_format`) can be **1** (fasta only), **2** (fasta and qual), **3** (fastq), **4** (fastq and input fasta), and **5** (fastq, fasta and qual).
 
@@ -51,7 +51,7 @@ The basic usage of PRINSEQ for paired-end data is:
 {{< highlight bash >}}
 $ prinseq-lite.pl [-fasta|-fastq] input_reads_pair_1.[fasta|fastq] [-fasta2|-fastq2] input_reads_pair_2.[fasta|fastq] -out_format [1|2|3|4|5] [options]
 {{< /highlight >}}
-where **input_reads_pair_1.[fasta|fastq]** and **input_reads_pair_2.[fasta|fastq]** are pair 1 and pair 2 of the input files of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the the [PRINSEQ manual] (http://prinseq.sourceforge.net/manual.html).
+where **input_reads_pair_1.[fasta|fastq]** and **input_reads_pair_2.[fasta|fastq]** are pair 1 and pair 2 of the input files of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the the [PRINSEQ manual](http://prinseq.sourceforge.net/manual.html).
 
 The output format (`-out_format`) can be **1** (fasta only), **2** (fasta and qual), **3** (fastq), **4** (fastq and input fasta), and **5** (fastq, fasta and qual).
 
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md
index 0b18b6cd..e907b013 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Scythe] (https://github.com/vsbuffalo/scythe) is a 3' end adapter trimmer that uses a Naive Bayesian approach to classify contaminant substrings in sequence reads. 3' ends often include poor quality bases which need to be removed prior the quality-based trimming, mapping, assemblies, and further analysis.
+[Scythe](https://github.com/vsbuffalo/scythe) is a 3' end adapter trimmer that uses a Naive Bayesian approach to classify contaminant substrings in sequence reads. 3' ends often include poor quality bases which need to be removed prior the quality-based trimming, mapping, assemblies, and further analysis.
 
 
 The basic usage of Scythe is:
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md
index 6062f2cc..7550b450 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Sickle] (https://github.com/najoshi/sickle) is a windowed adaptive trimming tools for fastq files. Beside sliding window, Sickle uses quality and length thresholds to determine and trim low quality bases at both 3' end and 5' end of the reads.
+[Sickle](https://github.com/najoshi/sickle) is a windowed adaptive trimming tools for fastq files. Beside sliding window, Sickle uses quality and length thresholds to determine and trim low quality bases at both 3' end and 5' end of the reads.
 
 
 Information about the Sickle command-line options can be shown by typing:
diff --git a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md
index 05e1f358..eeb9a893 100644
--- a/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md
+++ b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md
@@ -5,13 +5,13 @@ weight = "10"
 +++
 
 
-[TagCleaner] (http://tagcleaner.sourceforge.net/) is a tool used to automatically detect and remove tag sequences from genomic and metagenomic sequence data. These additional tag sequences can contain deletions or insertions due to sequencing limitations.
+[TagCleaner](http://tagcleaner.sourceforge.net/) is a tool used to automatically detect and remove tag sequences from genomic and metagenomic sequence data. These additional tag sequences can contain deletions or insertions due to sequencing limitations.
 
 The basic usage of TagCleaner is:
 {{< highlight bash >}}
 $ tagcleaner.pl [-fasta|-fastq] input_reads.[fasta|fastq] [-predict|-tag3|-tag5] [options]
 {{< /highlight >}}
-where **input_reads.[fasta|fastq]** is an input file of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the [TagCleaner manual] (http://tagcleaner.sourceforge.net/manual.html).
+where **input_reads.[fasta|fastq]** is an input file of sequence data in fasta/fastq format, and **options** are additional parameters that can be found in the [TagCleaner manual](http://tagcleaner.sourceforge.net/manual.html).
 
 
 Required parameter for TagCleaner is the tag sequence. If the tag sequence is unknown, then the **-predict** option will provide the predicted tag sequence to the user. If the tag sequence is known and is found at the 3' end of the read, then the option **-tag3 &lt;tag_sequence&gt;** is used. If the tag sequence is known and is found at the 5' end of the read, the the option **-tag5 &lt;tag_sequence&gt;** is used.
diff --git a/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
index cfde2a04..711471e1 100644
--- a/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
+++ b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
@@ -5,7 +5,7 @@ weight = "10"
 +++
 
 
-[Cufflinks] (http://cole-trapnell-lab.github.io/cufflinks/) is a transcript assembly program that includes a number of tools for analyzing RNA-Seq data. These tools assemble aligned RNA-Seq reads into transcripts, estimate their abundances, test for differential expression and regulation, and provide transcript quantification. Some of the tools part of Cufflinks can be run individually, while others are part of a larger workflow.
+[Cufflinks](http://cole-trapnell-lab.github.io/cufflinks/) is a transcript assembly program that includes a number of tools for analyzing RNA-Seq data. These tools assemble aligned RNA-Seq reads into transcripts, estimate their abundances, test for differential expression and regulation, and provide transcript quantification. Some of the tools part of Cufflinks can be run individually, while others are part of a larger workflow.
 
 The basic usage of Cufflinks is:
 {{< highlight bash >}}
@@ -13,7 +13,7 @@ $ cufflinks [options] input_alignments.[sam|bam]
 {{< /highlight >}}
 where `input_alignments.[sam|bam]` is sorted input file of RNA-Seq read alignments in SAM/BAM format. The RNA-Seq read mapper TopHat/TopHat2 produces output in this format and is recommended to be used with Cufflinks, although SAM/BAM alignments produced from any aligner are accepted. 
 
-More advanced Cufflinks options can be found in [the manual] (http://cole-trapnell-lab.github.io/cufflinks/manual/) or by typing:
+More advanced Cufflinks options can be found in [the manual](http://cole-trapnell-lab.github.io/cufflinks/manual/) or by typing:
 {{< highlight bash >}}
 $ cufflinks -h
 {{< /highlight >}}
diff --git a/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
index b752007b..4fa711fc 100644
--- a/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
+++ b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
@@ -17,7 +17,7 @@ Some of the most frequently used executables from the CD-HIT package are: CD-HIT
 - CD-HIT-LAP identifies overlapping reads
 
 Detailed overview of the whole CD-HIT package and executables can be found in the 
-[CD-HIT user's guide] (http://weizhongli-lab.org/lab-wiki/doku.php?id=cd-hit-user-guide).
+[CD-HIT user's guide](http://weizhongli-lab.org/lab-wiki/doku.php?id=cd-hit-user-guide).
 
 
 The basic usage of CD-HIT is:
diff --git a/content/applications/app_specific/fortran_c_on_hcc.md b/content/applications/app_specific/fortran_c_on_hcc.md
index 3c41ebfc..877bd5d0 100644
--- a/content/applications/app_specific/fortran_c_on_hcc.md
+++ b/content/applications/app_specific/fortran_c_on_hcc.md
@@ -102,7 +102,7 @@ Collection, `gcc`, for demonstration. Other available compilers such as
 line `module avail`.  Before compiling the code, make sure there is no 
 dependency on any numerical library in the code. If invoking a numerical 
 library is necessary, contact a HCC specialist 
-({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)) to
+({{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)) to
 discuss implementation options.
 
 {{< highlight bash >}}
diff --git a/content/applications/app_specific/running_gaussian_at_hcc.md b/content/applications/app_specific/running_gaussian_at_hcc.md
index b3d5bf65..45aa43e6 100644
--- a/content/applications/app_specific/running_gaussian_at_hcc.md
+++ b/content/applications/app_specific/running_gaussian_at_hcc.md
@@ -19,7 +19,7 @@ Kearney are not allowed to run **g09** program at HCC without purchase
 of a **g09** license.
 
 For access, contact us at
- {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+ {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 and include your HCC username. After your account has been added to the
 group "*gauss*", here are four simple steps to run Gaussian 09 on Crane:
 
diff --git a/content/connecting/mobaxterm.md b/content/connecting/mobaxterm.md
index 0bed8e41..c774b705 100644
--- a/content/connecting/mobaxterm.md
+++ b/content/connecting/mobaxterm.md
@@ -19,7 +19,7 @@ To connect to HCC resources using MobaXterm, open the application and select the
 Select SSH as the session type. Enter the cluster you are connecting to, in the example, `crane.unl.edu`, is used.  Check `Specify username`  and enter your HCC username in the the box. Note that &lt;username&gt;
 should be replaced by your HCC account username. If you do not have a
 HCC account, please contact a HCC specialist
-({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu))
+({{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu))
 or go to https://hcc.unl.edu/newusers.
 To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu.
 {{< figure src="/images/moba/session.png" height="450" >}}
diff --git a/content/connecting/putty.md b/content/connecting/putty.md
index 8f7b52df..6bcfcd7e 100644
--- a/content/connecting/putty.md
+++ b/content/connecting/putty.md
@@ -53,7 +53,7 @@ you want to connect to the `Rhino` cluster.
 
 3.  On the third screen, enter your HCC account **username**. If you do
     not have a HCC account, please contact an HCC specialist
-    ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu))
+    ({{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu))
     or go to http://hcc.unl.edu/newusers.
 
     {{% notice info %}}Replace `cbohn` with your username.{{% /notice %}}
@@ -85,7 +85,7 @@ you want to connect to the `Rhino` cluster.
 8.  Check your smart phone for Duo login request. Press "Approve" if you
     can verify the request. If you find any Duo login request that is
     not initiated by yourself, deny it and report this incident
-    immediately to {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+    immediately to {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
 
     {{< figure src="/images/8127263.png" height="450" >}}      
       
diff --git a/content/connecting/terminal.md b/content/connecting/terminal.md
index 785ac13b..ba30986d 100644
--- a/content/connecting/terminal.md
+++ b/content/connecting/terminal.md
@@ -7,7 +7,7 @@ weight = "10"
 ##### Use of Duo two-factor authentication is **required** to access HCC resources.
 ##### Please see [Setting up and Using Duo]({{< relref "setting_up_and_using_duo" >}}).
 ---
-- [Starting Terminal] (#starting-terminal)
+- [Starting Terminal](#starting-terminal)
   - [MacOS](#macos)
   - [Linux](#linux)
   - [Windows](#windows)
diff --git a/content/contact_us/_index.md b/content/contact_us/_index.md
index 2cf841eb..62a87db6 100644
--- a/content/contact_us/_index.md
+++ b/content/contact_us/_index.md
@@ -5,7 +5,7 @@ weight = "100"
 +++
 
 If you have questions, please contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
+{{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu)
 or stop by one of our locations.
 
 |                  Lincoln                        |                     Omaha          |
diff --git a/content/good_hcc_practices/_index.md b/content/good_hcc_practices/_index.md
index cd1c03dc..c7b07cad 100644
--- a/content/good_hcc_practices/_index.md
+++ b/content/good_hcc_practices/_index.md
@@ -73,4 +73,4 @@ and [mem_report]({{< relref "monitoring_jobs" >}}). While these tools can not pr
 useful information the researcher can use the next time that particular application is run.
 
 We strongly recommend you to read and follow this guidance. If you have any concerns about your workflows or need any 
-assistance, please contact HCC Support at {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+assistance, please contact HCC Support at {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
diff --git a/content/handling_data/data_storage/linux_file_permissions.md b/content/handling_data/data_storage/linux_file_permissions.md
index 9a3d7099..4739cbb2 100644
--- a/content/handling_data/data_storage/linux_file_permissions.md
+++ b/content/handling_data/data_storage/linux_file_permissions.md
@@ -4,9 +4,9 @@ description = "How to view and change file permissions with Linux commands"
 weight = 20
 +++
 
-- [Opening a Terminal Window] (#opening-a-terminal-window)
-- [Listing File Permissions] (#listing-file-permissions)
-- [Changing File Permissions] (#changing-file-permissions)
+- [Opening a Terminal Window](#opening-a-terminal-window)
+- [Listing File Permissions](#listing-file-permissions)
+- [Changing File Permissions](#changing-file-permissions)
 
 ## Opening a Terminal Window
 -----------------------
diff --git a/content/intro/_index.md b/content/intro/_index.md
index bab84a39..0ab86ab0 100644
--- a/content/intro/_index.md
+++ b/content/intro/_index.md
@@ -21,7 +21,7 @@ such as HTCondor or SLURM.
 
 #### Login Node:
 Users will automatically land on the login node when they log in to the clusters. 
-You will [submit jobs] ({{< ref "/submitting_jobs" >}}) using one of the schedulers 
+You will [submit jobs]({{< ref "/submitting_jobs" >}}) using one of the schedulers 
 and pull the results of your jobs. Any jobs running on the login node directly will be 
 stopped so others can use the login node to submit jobs.
 
diff --git a/content/submitting_jobs/partitions/_index.md b/content/submitting_jobs/partitions/_index.md
index 88466f8e..5ab7adae 100644
--- a/content/submitting_jobs/partitions/_index.md
+++ b/content/submitting_jobs/partitions/_index.md
@@ -59,7 +59,7 @@ Partitions marked as owned by a group means only specific groups are
 allowed to submit jobs to that partition.  Groups are manually added to
 the list allowed to submit jobs to the partition.  If you are unable to
 submit jobs to a partition, and you feel that you should be, please
-contact {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
+contact {{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu).
 
 ### Guest Partition
 
-- 
GitLab