diff --git a/content/events/2012/_index.md b/content/Events/2012/_index.md
similarity index 100%
rename from content/events/2012/_index.md
rename to content/Events/2012/_index.md
diff --git a/content/events/2012/nebraska_supercomputing_symposium_12.md b/content/Events/2012/nebraska_supercomputing_symposium_12.md
similarity index 100%
rename from content/events/2012/nebraska_supercomputing_symposium_12.md
rename to content/Events/2012/nebraska_supercomputing_symposium_12.md
diff --git a/content/events/2012/supercomputing_mini_workshop_2012.md b/content/Events/2012/supercomputing_mini_workshop_2012.md
similarity index 100%
rename from content/events/2012/supercomputing_mini_workshop_2012.md
rename to content/Events/2012/supercomputing_mini_workshop_2012.md
diff --git a/content/Events/2013/HCC_Supercomputing_Symposium_2013.md b/content/Events/2013/HCC_Supercomputing_Symposium_2013.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/events/2013/_index.md b/content/Events/2013/_index.md
similarity index 100%
rename from content/events/2013/_index.md
rename to content/Events/2013/_index.md
diff --git a/content/Events/2013/hcc_condor_workshop_2013.md b/content/Events/2013/hcc_condor_workshop_2013.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/Events/2013/hcc_matlab_workshop_2013.md b/content/Events/2013/hcc_matlab_workshop_2013.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/Events/2013/hcc_mpi_workshop_2013.md b/content/Events/2013/hcc_mpi_workshop_2013.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/events/2013/june_2013_osg_workshop.md b/content/Events/2013/june_2013_osg_workshop.md
similarity index 100%
rename from content/events/2013/june_2013_osg_workshop.md
rename to content/Events/2013/june_2013_osg_workshop.md
diff --git a/content/events/2013/supercomputing_mini_workshop_february_27_2013.md b/content/Events/2013/supercomputing_mini_workshop_february_27_2013.md
similarity index 100%
rename from content/events/2013/supercomputing_mini_workshop_february_27_2013.md
rename to content/Events/2013/supercomputing_mini_workshop_february_27_2013.md
diff --git a/content/events/2014/_index.md b/content/Events/2014/_index.md
similarity index 100%
rename from content/events/2014/_index.md
rename to content/Events/2014/_index.md
diff --git a/content/events/2014/august_2014_uno_hcc_workshop.md b/content/Events/2014/august_2014_uno_hcc_workshop.md
similarity index 100%
rename from content/events/2014/august_2014_uno_hcc_workshop.md
rename to content/Events/2014/august_2014_uno_hcc_workshop.md
diff --git a/content/events/2014/july_2014_bioinformatics_workshop.md b/content/Events/2014/july_2014_bioinformatics_workshop.md
similarity index 100%
rename from content/events/2014/july_2014_bioinformatics_workshop.md
rename to content/Events/2014/july_2014_bioinformatics_workshop.md
diff --git a/content/events/2014/june_2014_bioinformatics_workshop.md b/content/Events/2014/june_2014_bioinformatics_workshop.md
similarity index 100%
rename from content/events/2014/june_2014_bioinformatics_workshop.md
rename to content/Events/2014/june_2014_bioinformatics_workshop.md
diff --git a/content/events/2014/october_2014_hcc_workshop_college_of_engineering.md b/content/Events/2014/october_2014_hcc_workshop_college_of_engineering.md
similarity index 100%
rename from content/events/2014/october_2014_hcc_workshop_college_of_engineering.md
rename to content/Events/2014/october_2014_hcc_workshop_college_of_engineering.md
diff --git a/content/events/2014/september_2014_hcc_workshop_earth_and_atmospheric_sciences.md b/content/Events/2014/september_2014_hcc_workshop_earth_and_atmospheric_sciences.md
similarity index 100%
rename from content/events/2014/september_2014_hcc_workshop_earth_and_atmospheric_sciences.md
rename to content/Events/2014/september_2014_hcc_workshop_earth_and_atmospheric_sciences.md
diff --git a/content/events/2015/_index.md b/content/Events/2015/_index.md
similarity index 100%
rename from content/events/2015/_index.md
rename to content/Events/2015/_index.md
diff --git a/content/events/2015/hcc_fall_kickstart_2015.md b/content/Events/2015/hcc_fall_kickstart_2015.md
similarity index 100%
rename from content/events/2015/hcc_fall_kickstart_2015.md
rename to content/Events/2015/hcc_fall_kickstart_2015.md
diff --git a/content/events/2016/_index.md b/content/Events/2016/_index.md
similarity index 100%
rename from content/events/2016/_index.md
rename to content/Events/2016/_index.md
diff --git a/content/events/2016/hcc_fall_kickstart_2016.md b/content/Events/2016/hcc_fall_kickstart_2016.md
similarity index 100%
rename from content/events/2016/hcc_fall_kickstart_2016.md
rename to content/Events/2016/hcc_fall_kickstart_2016.md
diff --git a/content/Events/2016/hcc_qiime_workshop_2016.md b/content/Events/2016/hcc_qiime_workshop_2016.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/events/2016/hcc_spring_workshop_uno.md b/content/Events/2016/hcc_spring_workshop_uno.md
similarity index 100%
rename from content/events/2016/hcc_spring_workshop_uno.md
rename to content/Events/2016/hcc_spring_workshop_uno.md
diff --git a/content/events/2016/instructions_for_52016_allinea_workshop.md b/content/Events/2016/instructions_for_52016_allinea_workshop.md
similarity index 100%
rename from content/events/2016/instructions_for_52016_allinea_workshop.md
rename to content/Events/2016/instructions_for_52016_allinea_workshop.md
diff --git a/content/events/2016/unl_numerical_analysis_class_august_5th_2016.md b/content/Events/2016/unl_numerical_analysis_class_august_5th_2016.md
similarity index 100%
rename from content/events/2016/unl_numerical_analysis_class_august_5th_2016.md
rename to content/Events/2016/unl_numerical_analysis_class_august_5th_2016.md
diff --git a/content/events/2017/_index.md b/content/Events/2017/_index.md
similarity index 100%
rename from content/events/2017/_index.md
rename to content/Events/2017/_index.md
diff --git a/content/events/2017/hcc_users_group.md b/content/Events/2017/hcc_users_group.md
similarity index 100%
rename from content/events/2017/hcc_users_group.md
rename to content/Events/2017/hcc_users_group.md
diff --git a/content/events/2017/hgrlab_anvil_presentation.md b/content/Events/2017/hgrlab_anvil_presentation.md
similarity index 100%
rename from content/events/2017/hgrlab_anvil_presentation.md
rename to content/Events/2017/hgrlab_anvil_presentation.md
diff --git a/content/events/2017/june_workshop_series_2017.md b/content/Events/2017/june_workshop_series_2017.md
similarity index 100%
rename from content/events/2017/june_workshop_series_2017.md
rename to content/Events/2017/june_workshop_series_2017.md
diff --git a/content/Events/2017/unk_linear_algebra_feb_28th_2017.md b/content/Events/2017/unk_linear_algebra_feb_28th_2017.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/Events/2017/unl_r_for_biologists_class_march_8_2017.md b/content/Events/2017/unl_r_for_biologists_class_march_8_2017.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/events/2017/unl_stat802_january_23rd_2017.md b/content/Events/2017/unl_stat802_january_23rd_2017.md
similarity index 100%
rename from content/events/2017/unl_stat802_january_23rd_2017.md
rename to content/Events/2017/unl_stat802_january_23rd_2017.md
diff --git a/content/events/2017/unl_stat_802_fall_2017.md b/content/Events/2017/unl_stat_802_fall_2017.md
similarity index 100%
rename from content/events/2017/unl_stat_802_fall_2017.md
rename to content/Events/2017/unl_stat_802_fall_2017.md
diff --git a/content/events/2017/unl_stat_850_fall_2017.md b/content/Events/2017/unl_stat_850_fall_2017.md
similarity index 100%
rename from content/events/2017/unl_stat_850_fall_2017.md
rename to content/Events/2017/unl_stat_850_fall_2017.md
diff --git a/content/events/2017/unl_stat_950_fall_2017.md b/content/Events/2017/unl_stat_950_fall_2017.md
similarity index 100%
rename from content/events/2017/unl_stat_950_fall_2017.md
rename to content/Events/2017/unl_stat_950_fall_2017.md
diff --git a/content/events/2017/unl_stat_alpha_seminar_fall_2017.md b/content/Events/2017/unl_stat_alpha_seminar_fall_2017.md
similarity index 100%
rename from content/events/2017/unl_stat_alpha_seminar_fall_2017.md
rename to content/Events/2017/unl_stat_alpha_seminar_fall_2017.md
diff --git a/content/events/2018/_index.md b/content/Events/2018/_index.md
similarity index 100%
rename from content/events/2018/_index.md
rename to content/Events/2018/_index.md
diff --git a/content/events/2018/unl_stat_850_fall_2018.md b/content/Events/2018/unl_stat_850_fall_2018.md
similarity index 100%
rename from content/events/2018/unl_stat_850_fall_2018.md
rename to content/Events/2018/unl_stat_850_fall_2018.md
diff --git a/content/events/_index.md b/content/Events/_index.md
similarity index 86%
rename from content/events/_index.md
rename to content/Events/_index.md
index ab0b059526d54bb96c3f0a47848bdd7b64057d74..16ad5fd8fc2fed7fcfe424153741d08dbd8a5865 100644
--- a/content/events/_index.md
+++ b/content/Events/_index.md
@@ -1,7 +1,8 @@
 +++
 title = "Events"
 description = "Historical listing of various HCC events."
-weight = "30"
+weight = "70"
+hidden = true
 +++
 
 Historical listing of HCC Events
diff --git a/content/FAQ/_index.md b/content/FAQ/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/osg/_index.md b/content/OSG/_index.md
similarity index 98%
rename from content/osg/_index.md
rename to content/OSG/_index.md
index 034162b2d822a44bfe370ae3fc52b5ac79229e31..4dcce7d913f66f75435f717d6039e3b28d883fa7 100644
--- a/content/osg/_index.md
+++ b/content/OSG/_index.md
@@ -1,7 +1,7 @@
 +++
 title = "The Open Science Grid"
 description = "How to utilize the Open Science Grid (OSG)."
-weight = "40"
+weight = "80"
 +++
 
 If you find that you are not getting access to the volume of computing
diff --git a/content/osg/a_simple_example_of_submitting_an_htcondor_job.md b/content/OSG/a_simple_example_of_submitting_an_htcondor_job.md
similarity index 100%
rename from content/osg/a_simple_example_of_submitting_an_htcondor_job.md
rename to content/OSG/a_simple_example_of_submitting_an_htcondor_job.md
diff --git a/content/osg/characteristics_of_an_osg_friendly_job.md b/content/OSG/characteristics_of_an_osg_friendly_job.md
similarity index 100%
rename from content/osg/characteristics_of_an_osg_friendly_job.md
rename to content/OSG/characteristics_of_an_osg_friendly_job.md
diff --git a/content/osg/how_to_submit_an_osg_job_with_htcondor.md b/content/OSG/how_to_submit_an_osg_job_with_htcondor.md
similarity index 100%
rename from content/osg/how_to_submit_an_osg_job_with_htcondor.md
rename to content/OSG/how_to_submit_an_osg_job_with_htcondor.md
diff --git a/content/osg/using_distributed_environment_modules_on_osg.md b/content/OSG/using_distributed_environment_modules_on_osg.md
similarity index 100%
rename from content/osg/using_distributed_environment_modules_on_osg.md
rename to content/OSG/using_distributed_environment_modules_on_osg.md
diff --git a/content/_index.md b/content/_index.md
index bc6b11c4317d565a8da49beda90a1ffe3b1a2086..29338843aaa9e52eeb6830bda7d8b86a0ece9513 100644
--- a/content/_index.md
+++ b/content/_index.md
@@ -21,8 +21,6 @@ users almost immediate access, but the job is subject to preemption.
 
 #### [New Users Sign Up](http://hcc.unl.edu/new-user-request)
 
-#### [Quick Start Guides](/quickstarts)
-
 Which Cluster to Use?
 ---------------------
 
@@ -59,7 +57,7 @@ Duo Security
 
 Duo two-factor authentication is **required** for access to HCC
 resources. Registration and usage of Duo security can be found in this
-section: [Setting up and using Duo]({{< relref "setting_up_and_using_duo">}})
+section: [Setting up and using Duo]({{< relref "/accounts/setting_up_and_using_duo">}})
 
 **Important Notes**
 
diff --git a/content/accounts/_index.md b/content/accounts/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..d443ac6e985b83160858784d3acedeb1fbfe80b1
--- /dev/null
+++ b/content/accounts/_index.md
@@ -0,0 +1,17 @@
++++
+title = "Creating an Account"
+weight = "20"
++++
+
+Anyone affiliated with the Unveristy of Nebraska system can request an account on
+and use HCC shared resources for free.
+
+All HCC accounts are associated with an faculty-owned HCC group. Individuals interested
+in requested an account under an established group will need to complete a [new user request](http://hcc.unl.edu/new-user-request/).
+
+To establish a new group, please complete a [new group request](https://hcc.unl.edu/new-group-request).
+
+Additional guides on basic account operations.
+--------------------------------------
+
+{{% children description="true" %}}
diff --git a/content/quickstarts/connecting/how_to_change_your_password.md b/content/accounts/how_to_change_your_password.md
similarity index 100%
rename from content/quickstarts/connecting/how_to_change_your_password.md
rename to content/accounts/how_to_change_your_password.md
diff --git a/content/quickstarts/setting_up_and_using_duo.md b/content/accounts/setting_up_and_using_duo.md
similarity index 100%
rename from content/quickstarts/setting_up_and_using_duo.md
rename to content/accounts/setting_up_and_using_duo.md
diff --git a/content/anvil/_index.md b/content/anvil/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/anvil/adding_ssh_key_pairs.md b/content/anvil/adding_ssh_key_pairs.md
similarity index 100%
rename from content/guides/anvil/adding_ssh_key_pairs.md
rename to content/anvil/adding_ssh_key_pairs.md
diff --git a/content/guides/anvil/anvil_instance_types.md b/content/anvil/anvil_instance_types.md
similarity index 100%
rename from content/guides/anvil/anvil_instance_types.md
rename to content/anvil/anvil_instance_types.md
diff --git a/content/guides/anvil/available_images.md b/content/anvil/available_images.md
similarity index 100%
rename from content/guides/anvil/available_images.md
rename to content/anvil/available_images.md
diff --git a/content/guides/anvil/connecting_to_linux_instances_from_mac.md b/content/anvil/connecting_to_linux_instances_from_mac.md
similarity index 100%
rename from content/guides/anvil/connecting_to_linux_instances_from_mac.md
rename to content/anvil/connecting_to_linux_instances_from_mac.md
diff --git a/content/guides/anvil/connecting_to_linux_instances_from_windows.md b/content/anvil/connecting_to_linux_instances_from_windows.md
similarity index 100%
rename from content/guides/anvil/connecting_to_linux_instances_from_windows.md
rename to content/anvil/connecting_to_linux_instances_from_windows.md
diff --git a/content/guides/anvil/connecting_to_linux_instances_using_x2go.md b/content/anvil/connecting_to_linux_instances_using_x2go.md
similarity index 100%
rename from content/guides/anvil/connecting_to_linux_instances_using_x2go.md
rename to content/anvil/connecting_to_linux_instances_using_x2go.md
diff --git a/content/guides/anvil/connecting_to_the_anvil_vpn.md b/content/anvil/connecting_to_the_anvil_vpn.md
similarity index 100%
rename from content/guides/anvil/connecting_to_the_anvil_vpn.md
rename to content/anvil/connecting_to_the_anvil_vpn.md
diff --git a/content/guides/anvil/connecting_to_windows_instances.md b/content/anvil/connecting_to_windows_instances.md
similarity index 100%
rename from content/guides/anvil/connecting_to_windows_instances.md
rename to content/anvil/connecting_to_windows_instances.md
diff --git a/content/guides/anvil/creating_an_instance.md b/content/anvil/creating_an_instance.md
similarity index 100%
rename from content/guides/anvil/creating_an_instance.md
rename to content/anvil/creating_an_instance.md
diff --git a/content/guides/anvil/creating_and_attaching_a_volume.md b/content/anvil/creating_and_attaching_a_volume.md
similarity index 100%
rename from content/guides/anvil/creating_and_attaching_a_volume.md
rename to content/anvil/creating_and_attaching_a_volume.md
diff --git a/content/guides/anvil/creating_ssh_key_pairs_on_mac.md b/content/anvil/creating_ssh_key_pairs_on_mac.md
similarity index 100%
rename from content/guides/anvil/creating_ssh_key_pairs_on_mac.md
rename to content/anvil/creating_ssh_key_pairs_on_mac.md
diff --git a/content/guides/anvil/creating_ssh_key_pairs_on_windows.md b/content/anvil/creating_ssh_key_pairs_on_windows.md
similarity index 100%
rename from content/guides/anvil/creating_ssh_key_pairs_on_windows.md
rename to content/anvil/creating_ssh_key_pairs_on_windows.md
diff --git a/content/guides/anvil/formatting_and_mounting_a_volume_in_linux.md b/content/anvil/formatting_and_mounting_a_volume_in_linux.md
similarity index 100%
rename from content/guides/anvil/formatting_and_mounting_a_volume_in_linux.md
rename to content/anvil/formatting_and_mounting_a_volume_in_linux.md
diff --git a/content/guides/anvil/formatting_and_mounting_a_volume_in_windows.md b/content/anvil/formatting_and_mounting_a_volume_in_windows.md
similarity index 100%
rename from content/guides/anvil/formatting_and_mounting_a_volume_in_windows.md
rename to content/anvil/formatting_and_mounting_a_volume_in_windows.md
diff --git a/content/guides/anvil/resizing_an_instance.md b/content/anvil/resizing_an_instance.md
similarity index 100%
rename from content/guides/anvil/resizing_an_instance.md
rename to content/anvil/resizing_an_instance.md
diff --git a/content/guides/anvil/what_are_the_per_group_resource_limits.md b/content/anvil/what_are_the_per_group_resource_limits.md
similarity index 100%
rename from content/guides/anvil/what_are_the_per_group_resource_limits.md
rename to content/anvil/what_are_the_per_group_resource_limits.md
diff --git a/content/applications/_index.md b/content/applications/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..ecaa1702f4266fd0e4a6f3225e04b13bc2b24b17
--- /dev/null
+++ b/content/applications/_index.md
@@ -0,0 +1,9 @@
++++
+title = "Running Applications"
+weight = "40"
++++
+
+In-depth guides for using applications on HCC resources
+--------------------------------------
+
+{{% children description="true" %}}
diff --git a/content/applications/app_specific/Jupyter.md b/content/applications/app_specific/Jupyter.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/applications/app_specific/_index.md b/content/applications/app_specific/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..0e4b37a99c69a025b8a48571403e4b1d9364dc6a
--- /dev/null
+++ b/content/applications/app_specific/_index.md
@@ -0,0 +1,9 @@
++++
+title = "Application Specific Guides"
+weight = "40"
++++
+
+In-depth guides for running applications on HCC resources
+--------------------------------------
+
+{{% children description="true" %}}
diff --git a/content/guides/running_applications/allinea_profiling_and_debugging/_index.md b/content/applications/app_specific/allinea_profiling_and_debugging/_index.md
similarity index 100%
rename from content/guides/running_applications/allinea_profiling_and_debugging/_index.md
rename to content/applications/app_specific/allinea_profiling_and_debugging/_index.md
diff --git a/content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/_index.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
similarity index 100%
rename from content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
rename to content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md
diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md
similarity index 100%
rename from content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md
rename to content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md
diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/running_applications/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md b/content/applications/app_specific/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md
similarity index 100%
rename from content/guides/running_applications/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md
rename to content/applications/app_specific/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md
diff --git a/content/guides/running_applications/bioinformatics_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/_index.md
rename to content/applications/app_specific/bioinformatics_tools/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/_index.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/blast/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/blast/_index.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md
diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/blat.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/blat.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/bowtie.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/bowtie.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/bowtie2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/bowtie2.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/bwa/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/bwa/_index.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/clustal_omega.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/clustal_omega.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/tophat_tophat2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/alignment_tools/tophat_tophat2.md
rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md
diff --git a/content/applications/app_specific/bioinformatics_tools/biodata_module.md b/content/applications/app_specific/bioinformatics_tools/biodata_module.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/_index.md
rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md
rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md
rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md
diff --git a/content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/samtools/_index.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md
rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md
diff --git a/content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/sratoolkit.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/_index.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/oases.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/oases.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/ray.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/ray.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md
diff --git a/content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md
rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md
diff --git a/content/guides/running_applications/bioinformatics_tools/downloading_sra_data_from_ncbi.md b/content/applications/app_specific/bioinformatics_tools/downloading_sra_data_from_ncbi.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/downloading_sra_data_from_ncbi.md
rename to content/applications/app_specific/bioinformatics_tools/downloading_sra_data_from_ncbi.md
diff --git a/content/guides/running_applications/bioinformatics_tools/pre_processing_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/pre_processing_tools/_index.md
rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/pre_processing_tools/cutadapt.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/pre_processing_tools/cutadapt.md
rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md
diff --git a/content/guides/running_applications/bioinformatics_tools/pre_processing_tools/prinseq.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/pre_processing_tools/prinseq.md
rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md
diff --git a/content/guides/running_applications/bioinformatics_tools/pre_processing_tools/scythe.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/pre_processing_tools/scythe.md
rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md
diff --git a/content/guides/running_applications/bioinformatics_tools/pre_processing_tools/sickle.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/pre_processing_tools/sickle.md
rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md
diff --git a/content/guides/running_applications/bioinformatics_tools/pre_processing_tools/tagcleaner.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/pre_processing_tools/tagcleaner.md
rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md
diff --git a/content/guides/running_applications/bioinformatics_tools/qiime.md b/content/applications/app_specific/bioinformatics_tools/qiime.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/qiime.md
rename to content/applications/app_specific/bioinformatics_tools/qiime.md
diff --git a/content/guides/running_applications/bioinformatics_tools/reference_based_assembly_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/reference_based_assembly_tools/_index.md
rename to content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
rename to content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md
diff --git a/content/guides/running_applications/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md
rename to content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md
diff --git a/content/guides/running_applications/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md
rename to content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md
diff --git a/content/guides/running_applications/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
similarity index 100%
rename from content/guides/running_applications/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
rename to content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md
diff --git a/content/applications/app_specific/dmtcp_checkpointing.md b/content/applications/app_specific/dmtcp_checkpointing.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/applications/app_specific/fortran_c_on_hcc.md b/content/applications/app_specific/fortran_c_on_hcc.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/applications/app_specific/mpi_jobs_on_hcc.md b/content/applications/app_specific/mpi_jobs_on_hcc.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/running_applications/running_gaussian_at_hcc.md b/content/applications/app_specific/running_gaussian_at_hcc.md
similarity index 100%
rename from content/guides/running_applications/running_gaussian_at_hcc.md
rename to content/applications/app_specific/running_gaussian_at_hcc.md
diff --git a/content/guides/running_applications/running_matlab_parallel_server.md b/content/applications/app_specific/running_matlab_parallel_server.md
similarity index 100%
rename from content/guides/running_applications/running_matlab_parallel_server.md
rename to content/applications/app_specific/running_matlab_parallel_server.md
diff --git a/content/guides/running_applications/running_ocean_land_atmosphere_model_olam.md b/content/applications/app_specific/running_ocean_land_atmosphere_model_olam.md
similarity index 100%
rename from content/guides/running_applications/running_ocean_land_atmosphere_model_olam.md
rename to content/applications/app_specific/running_ocean_land_atmosphere_model_olam.md
diff --git a/content/guides/running_applications/running_theano.md b/content/applications/app_specific/running_theano.md
similarity index 100%
rename from content/guides/running_applications/running_theano.md
rename to content/applications/app_specific/running_theano.md
diff --git a/content/guides/running_applications/module_commands.md b/content/applications/modules/_index.md
similarity index 95%
rename from content/guides/running_applications/module_commands.md
rename to content/applications/modules/_index.md
index 662fa29c3b67de8725ff8c57e5d2d51e6f17b586..52826a3b642767864cbc42525a96a7c95978f3f1 100644
--- a/content/guides/running_applications/module_commands.md
+++ b/content/applications/modules/_index.md
@@ -1,8 +1,16 @@
 +++
-title = "Module Commands"
+title = "Using Preinstalled Software"
 description = "How to use the module utility on HCC resources."
+weight=10
 +++
 
+HCC offers many popular software packages already installed. Unlike a traditional
+laptop or desktop, HCC resources use a module system for managing installed software. Users can load and
+use pre-installed software by using the `module` command.
+
+To request additional software installs, please complete a [software installation request]
+(https://hcc.unl.edu/software-installation-request).
+
 `module` commands provide an HPC system user the capability to compile
 into their source code using any type of library that is
 available on the server. The `module` command gives each user the
diff --git a/content/guides/running_applications/available_software_for_crane.md b/content/applications/modules/available_software_for_crane.md
similarity index 100%
rename from content/guides/running_applications/available_software_for_crane.md
rename to content/applications/modules/available_software_for_crane.md
diff --git a/content/guides/running_applications/available_software_for_rhino.md b/content/applications/modules/available_software_for_rhino.md
similarity index 100%
rename from content/guides/running_applications/available_software_for_rhino.md
rename to content/applications/modules/available_software_for_rhino.md
diff --git a/content/guides/running_applications/compiling_source_code/_index.md b/content/applications/user_software/_index.md
similarity index 58%
rename from content/guides/running_applications/compiling_source_code/_index.md
rename to content/applications/user_software/_index.md
index d98f37437b3a1342d91035854454b73ea183dabb..f8b6766503ab74a513cafbd9769789c4d5b186a3 100644
--- a/content/guides/running_applications/compiling_source_code/_index.md
+++ b/content/applications/user_software/_index.md
@@ -1,11 +1,14 @@
 +++
-title = "Compiling Source Code"
+title = "Using Custom Software"
 description = "How to compile source code of various types on HCC resources."
+weight=20
 +++
 
+# Compile Code from Source
+
 Compiling source code on HCC machines is done with compiler environments
-configured by the `module` utility.  The utility adds directives to the
-environment and adds executables to the `PATH` so that users and jobs
+configured by the `module` utility.  The utility adds directives to the
+environment and adds executables to the `PATH` so that users and jobs
 can directly call the compilers.
 
 ### Finding available compiler Modules
@@ -22,5 +25,5 @@ This command will list available compiler modules to load.
 $ module load <module>
 {{< /highlight >}}
 
-This command will load an available module.  The modules are usually in
-the form of `compiler/<name>/<version>`.
+This command will load an available module.  The modules are usually in
+the form of `compiler/<name>/<version>`.
diff --git a/content/guides/running_applications/compiling_source_code/compiling_an_openmp_application.md b/content/applications/user_software/compiling_an_openmp_application.md
similarity index 99%
rename from content/guides/running_applications/compiling_source_code/compiling_an_openmp_application.md
rename to content/applications/user_software/compiling_an_openmp_application.md
index e409a5bfdff476f808439e1fbf7778b03f8e7bd9..c4a0d71b6689fe54ef2150afb301604c5618d267 100644
--- a/content/guides/running_applications/compiling_source_code/compiling_an_openmp_application.md
+++ b/content/applications/user_software/compiling_an_openmp_application.md
@@ -1,6 +1,7 @@
 +++
 title = "Compiling an OpenMP Application"
 description = "How to compile an OpenMP-based application on HCC resources."
+weight=20
 +++
 
 Compiling an [OpenMP](https://computing.llnl.gov/tutorials/openMP)
diff --git a/content/guides/running_applications/installing_perl_modules.md b/content/applications/user_software/installing_perl_modules.md
similarity index 99%
rename from content/guides/running_applications/installing_perl_modules.md
rename to content/applications/user_software/installing_perl_modules.md
index 4674bce2d04985feb8284b64ca43173068cad76c..518c67310d5d460dfed4ee0f9bc095dfd2d6f7ba 100644
--- a/content/guides/running_applications/installing_perl_modules.md
+++ b/content/applications/user_software/installing_perl_modules.md
@@ -1,6 +1,7 @@
 +++
 title = "Installing Perl modules"
 description = "How to install needed Perl modules under your account."
+weight=90
 +++
 
 If you need additional Perl modules, they can be installed into your
diff --git a/content/guides/running_applications/using_anaconda_package_manager.md b/content/applications/user_software/using_anaconda_package_manager.md
similarity index 60%
rename from content/guides/running_applications/using_anaconda_package_manager.md
rename to content/applications/user_software/using_anaconda_package_manager.md
index 14526a4fcdefcc30bce61d579d2dd8187e67c043..13a3db0f51428289f7ef789197b7149c0f99a43c 100644
--- a/content/guides/running_applications/using_anaconda_package_manager.md
+++ b/content/applications/user_software/using_anaconda_package_manager.md
@@ -1,44 +1,44 @@
 +++
 title = "Using Anaconda Package Manager"
 description = "How to use the Anaconda Package Manager on HCC resources."
+weight=10
 +++
 
 [Anaconda](https://www.anaconda.com/what-is-anaconda),
-from [Anaconda, Inc](https://www.anaconda.com)
+from [Anaconda, Inc](https://www.anaconda.com)
 is a completely free enterprise-ready distribution for large-scale data
 processing, predictive analytics, and scientific computing. It includes
 over 195 of the most popular Python packages for science, math,
 engineering, and data analysis. **It also offers the ability to easily
 create custom _environments_ by mixing and matching different versions
 of Python and/or R and other packages into isolated environments that
-individual users are free to create.**  Anaconda includes the `conda`
+individual users are free to create.**  Anaconda includes the `conda`
 package and environment manager to make managing these environments
 straightforward.
 
 - [Using Anaconda](#using-anaconda)
-- [Creating custom Anaconda Environment](#creating-custom-anaconda-environment)
-- [Creating custom GPU Anaconda Environment](#creating-custom-gpu-anaconda-environment)
+- [Installing Packages](#installing-packages)
 - [Adding Packages to an Existing Environment](#adding-packages-to-an-existing-environment)
 - [Using an Anaconda Environment in a Jupyter Notebook on Crane](#using-an-anaconda-environment-in-a-jupyter-notebook-on-crane)
 
 ### Using Anaconda
 
-While the standard methods of installing packages via `pip`
-and `easy_install` work with Anaconda, the preferred method is using
-the `conda` command.  
+While the standard methods of installing packages via `pip`
+and `easy_install` work with Anaconda, the preferred method is using
+the `conda` command.  
 
 {{% notice info %}}
 Full documentation on using Conda is available
-at http://conda.pydata.org/docs/
+at http://conda.pydata.org/docs/
 
 A [cheatsheet](/attachments/11635089.pdf) is also provided.
 {{% /notice %}}
 
-A few examples of the basic commands are provided here.  For a full
+A few examples of the basic commands are provided here.  For a full
 explanation of all of Anaconda/Conda's capabilities, see the
-documentation linked above. 
+documentation linked above. 
 
-Anaconda is provided through the `anaconda` module on HCC machines.  To
+Anaconda is provided through the `anaconda` module on HCC machines.  To
 begin using it, load the Anaconda module.
 
 {{% panel theme="info" header="Load the Anaconda module to start using Conda" %}}
@@ -47,7 +47,7 @@ module load anaconda
 {{< /highlight >}}
 {{% /panel %}}
 
-To display general information about Conda/Anaconda, use the `info` subcommand.
+To display general information about Conda/Anaconda, use the `info` subcommand.
 
 {{% panel theme="info" header="Display general information about Conda/Anaconda" %}}
 {{< highlight bash >}}
@@ -56,8 +56,8 @@ conda info
 {{% /panel %}}
 
 Conda allows the easy creation of isolated, custom environments with
-packages and versions of your choosing.  To show all currently available
-environments, and which is active, use the `info `subcommand with the
+packages and versions of your choosing.  To show all currently available
+environments, and which is active, use the `info `subcommand with the
 `-e` option.
 
 {{% panel theme="info" header="List available environments" %}}
@@ -68,7 +68,7 @@ conda info -e
 
 The active environment will be marked with an asterisk (\*) character.
 
-The `list` command will show all packages installed
+The `list` command will show all packages installed
 in the currently active environment.
 
 {{% panel theme="info" header="List installed packages in current environment" %}}
@@ -77,7 +77,9 @@ conda list
 {{< /highlight >}}
 {{% /panel %}}
 
-To find the names of packages, use the `search` subcommand.
+### Installing Packages
+
+To find the names of packages, use the `search` subcommand.
 
 {{% panel theme="info" header="Search for packages" %}}
 {{< highlight bash >}}
@@ -89,27 +91,21 @@ If the package is available, this will also display available package
 versions and compatible Python versions the package may be installed
 under.
 
-### Creating Custom Anaconda Environment
-
-The `create` command is used to create a new environment.  It requires
+The `create` command is used to create a new environment.  It requires
 at a minimum a name for the environment, and at least one package to
-install.  For example, suppose we wish to create a new environment, and
+install.  For example, suppose we wish to create a new environment, and
 need version 1.8 of NumPy.
 
-{{% notice info %}}
-The `conda create` command must be run on the login node.
-{{% /notice %}}
-
 {{% panel theme="info" header="Create a new environment by providing a name and package specification" %}}
 {{< highlight bash >}}
-conda create -n mynumpy numpy=1.8 
+conda create -n mynumpy numpy=1.8 
 {{< /highlight >}}
 {{% /panel %}}
 
 This will create a new environment called 'mynumpy' and installed NumPy
-version 1.8, along with any required dependencies.  
+version 1.8, along with any required dependencies.  
 
-To use the environment, we must first *activate* it.
+To use the environment, we must first *activate* it.
 
 {{% panel theme="info" header="Activate environment" %}}
 {{< highlight bash >}}
@@ -117,53 +113,15 @@ source activate mynumpy
 {{< /highlight >}}
 {{% /panel %}}
 
-Our new environment is now active, and we can use it.  The shell prompt
+Our new environment is now active, and we can use it.  The shell prompt
 will change to indicate this as well (this can be disable if desired).
 
-### Creating Custom GPU Anaconda Environment
-
-We provide GPU versions of various frameworks such as `tensorflow`, `keras`, `theano`, via [modules](../module_commands). However, sometimes you may need additional libraries or packages that are not available as part of these modules. In this case, you will need to create your own GPU Anaconda environment.
-
-To do this, you need to first clone one of our GPU modules to a new Anaconda environment, and then install the desired packages in this new environment.
-
-The reason for this is that the GPU modules we support are built using the specific CUDA drivers our GPU nodes have. If you just create custom GPU environment without cloning the module, your code will not utilize the GPUs.
-
-
-For example, if you want to use `tensorflow` with additional packages, first do:
-{{% panel theme="info" header="Cloning GPU module to a new Anaconda environment" %}}
-{{< highlight bash >}}
-module load tensorflow-gpu/py36/1.12 anaconda
-conda create -n tensorflow-gpu-1.12-custom --clone $CONDA_DEFAULT_ENV
-module purge
-{{< /highlight >}}
-{{% /panel %}}
-
-This will create a new `tensorflow-gpu-1.12-custom` environment in your home directory that is a copy of the `tensorflow-gpu` module. Then, you can install the additional packages you need in this environment.
-{{% panel theme="info" header="Install new packages in the currently active environment" %}}
-{{< highlight bash >}}
-module load anaconda
-source activate tensorflow-gpu-1.12-custom
-conda install <packages>
-{{< /highlight >}}
-{{% /panel %}}
-
-Next, whenever you want to use this custom GPU Anaconda environment, you need to add these two lines in your submit script:
-{{< highlight bash >}}
-module load anaconda
-source activate tensorflow-gpu-1.12-custom
-{{< /highlight >}}
-
-{{% notice info %}}
-If you have custom GPU Anaconda environment please only use the two lines from above and **DO NOT** load the module you have cloned earlier. Using `module load tensorflow-gpu/py36/1.12` and `source activate tensorflow-gpu-1.12-custom` in the same script is **wrong** and may give you various errors and incorrect results.
-{{% /notice %}}
-
-
 ### Adding Packages to an Existing Environment
 
-To install additional packages in an environment, use the `install`
-subcommand.  Suppose we want to install iPython in our 'mynumpy'
-environment.  While the environment is active, use `install `with no
-additional arguments.  
+To install additional packages in an environment, use the `install`
+subcommand.  Suppose we want to install iPython in our 'mynumpy'
+environment.  While the environment is active, use `install `with no
+additional arguments.  
 
 {{% panel theme="info" header="Install a new package in the currently active environment" %}}
 {{< highlight bash >}}
@@ -171,12 +129,8 @@ conda install ipython
 {{< /highlight >}}
 {{% /panel %}}
 
-{{% notice info %}}
-The `conda install` command must be run on the login node.
-{{% /notice %}}
-
 If you aren't currently in the environment you wish to install the
-package in, add the `-n `option to specify the name.
+package in, add the `-n `option to specify the name.
 
 {{% panel theme="info" header="Install new packages in a specified environment" %}}
 {{< highlight bash >}}
@@ -184,7 +138,7 @@ conda install -n mynumpy ipython
 {{< /highlight >}}
 {{% /panel %}}
 
-The `remove` subcommand to uninstall a package functions similarly.
+The `remove` subcommand to uninstall a package functions similarly.
 
 {{% panel theme="info" header="Remove package from currently active environment" %}}
 {{< highlight bash >}}
@@ -198,21 +152,16 @@ conda remove -n mynumpy ipython
 {{< /highlight >}}
 {{% /panel %}}
 
-To exit an environment, we *deactivate* it.
+To exit an environment, we *deactivate* it.
 
 {{% panel theme="info" header="Exit current environment" %}}
-Newer versions of anaconda:
-{{< highlight bash >}}
-conda deactivate
-{{< /highlight >}}
-Older versions of anaconda:
 {{< highlight bash >}}
 source deactivate
 {{< /highlight >}}
 {{% /panel %}}
 
-Finally, to completely remove an environment, add the `--all `option
-to `remove`.
+Finally, to completely remove an environment, add the `--all `option
+to `remove`.
 
 {{% panel theme="info" header="Completely remove an environment" %}}
 {{< highlight bash >}}
@@ -267,7 +216,7 @@ Jupyter Notebook. To do so, follow the steps below, replacing
         {{< /highlight >}}
 
 4.  Once you have the environment set up, deactivate it:
-    {{< highlight bash >}}conda deactivate{{< /highlight >}}
+    {{< highlight bash >}}source deactivate{{< /highlight >}}
 
 5.  To make your conda environments accessible from the worker nodes,
     enter the following commands:
@@ -278,13 +227,10 @@ Jupyter Notebook. To do so, follow the steps below, replacing
     ln -s $WORK/.jupyter/kernels ~/.local/share/jupyter/kernels
     {{< /highlight >}}
 
-	{{% notice note %}}
+{{% notice note %}}
 **Note**: Step 5 only needs to be done once. Any future created
 environments will automatically be accessible from SLURM notebooks
 once this is done.
-
-**Note**: For older version of anaconda, use `source deactivate` to 
-deactivate the environment.
 {{% /notice %}}
 
 6.  Login to JupyterHub at https://crane.unl.edu
@@ -292,3 +238,4 @@ deactivate the environment.
     correct entry in the `New` dropdown menu in the top right
     corner.  
     {{< figure src="/images/24151931.png" height="400" class="img-border">}}
+
diff --git a/content/guides/running_applications/using_singularity.md b/content/applications/user_software/using_singularity.md
similarity index 99%
rename from content/guides/running_applications/using_singularity.md
rename to content/applications/user_software/using_singularity.md
index b04b7d1f413d29738d59840133c804d055477a14..d698904a6a18b64ab669ce803b4a747e8146e9dd 100644
--- a/content/guides/running_applications/using_singularity.md
+++ b/content/applications/user_software/using_singularity.md
@@ -1,6 +1,7 @@
 +++
 title = "Using Singularity and Docker Containers"
 description = "How to use the Singularity containerization software on HCC resources."
+weight=20
 +++
 
 ## What is Singularity
diff --git a/content/connecting/_index.md b/content/connecting/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..f8b2f3eaf85ce0b044454e759636a53d7ff73912
--- /dev/null
+++ b/content/connecting/_index.md
@@ -0,0 +1,10 @@
++++
+title = "Connecting"
+description = "Information on connecting to HCC resources"
+weight = "30"
++++
+
+How to connect to HCC resources
+--------------------------------------
+
+{{% children description="true" %}}
diff --git a/content/quickstarts/connecting/basic_linux_commands.md b/content/connecting/basic_linux_commands.md
similarity index 100%
rename from content/quickstarts/connecting/basic_linux_commands.md
rename to content/connecting/basic_linux_commands.md
diff --git a/content/quickstarts/connecting/for_maclinux_users.md b/content/connecting/for_maclinux_users.md
similarity index 76%
rename from content/quickstarts/connecting/for_maclinux_users.md
rename to content/connecting/for_maclinux_users.md
index dd4d1949ce3b9b34ed213830376e0fbe66be2be8..2e1a2243604979607b334b5953fc861cdfa0f70d 100644
--- a/content/quickstarts/connecting/for_maclinux_users.md
+++ b/content/connecting/for_maclinux_users.md
@@ -18,20 +18,20 @@ This quick start will help you configure your personal computer to work
 with the HCC supercomputers.
 
 If you are running Windows, please use the quickstart [For Windows
-Users]({{< relref "for_windows_users" >}}).
+Users]({{< relref "/connecting/for_windows_users" >}}).
 
 Access to HCC Supercomputers
 -------------------------------
 
 For Mac/Linux users, use the system program Terminal to access to the
 HCC supercomputers. In the Terminal prompt,
-type `ssh <username>@crane.unl.edu` and the corresponding password
-to get access to the HCC cluster **Crane**. Note that &lt;username&gt;
-should be replaced by your HCC account username. If you do not have a
+type `ssh <username>@crane.unl.edu` and the corresponding password
+to get access to the HCC cluster **Crane**. Note that &lt;username&gt;
+should be replaced by your HCC account username. If you do not have a
 HCC account, please contact a HCC specialist
 ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu))
 or go to https://hcc.unl.edu/newusers.
-To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu.
+To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu.
 
 {{< highlight bash >}}
 $ ssh <username>@crane.unl.edu
@@ -44,9 +44,9 @@ File Transferring with HCC Supercomputers
 ### Using the SCP command
 
 For Mac/Linux users, file transferring between your personal computer
-and the HCC supercomputers can be achieved through the command `scp`.
+and the HCC supercomputers can be achieved through the command `scp`.
 Here we use **Crane** for example. **The following commands should be
-executed from your computer. **
+executed from your computer. **
 
 **Uploading from local to remote**
 
@@ -57,7 +57,7 @@ $ scp -r ./<folder name> <username>@crane.unl.edu:/work/<group name>/<username>
 The above command line transfers a folder from the current directory
 (`./`) of the your computer to the `$WORK` directory of the HCC
 supercomputer, Crane. Note that you need to replace `<group name>`
-and `<username>` with your HCC group name and username. 
+and `<username>` with your HCC group name and username. 
 
 **Downloading from remote to local**
 
@@ -65,15 +65,15 @@ and `<username>` with your HCC group name and username. 
 $ scp -r <username>@crane.unl.edu:/work/<group name>/<username>/<folder name> ./
 {{< /highlight >}}
 
-The above command line transfers a folder from the `$WORK` directory of
-the HCC supercomputer, Crane, to the current directory (`./`) of the
+The above command line transfers a folder from the `$WORK` directory of
+the HCC supercomputer, Crane, to the current directory (`./`) of the
 your computer.
 
 ### Using Cyberduck
 ---------------
 
 If you wish to use a GUI, be aware that not all programs will function
-correctly with Duo two-factor authentication.  Mac users are recommended
+correctly with Duo two-factor authentication.  Mac users are recommended
 to use [Cyberduck](https://cyberduck.io). It is compatible with Duo, but a
 few settings need to be changed.
 
@@ -82,7 +82,7 @@ Under **Preferences - General**, change the default protocol to SFTP:
 {{< figure src="/images/7274497.png" height="450" >}}
 
 Under **Preferences - Transfers**, reuse the browser connection for file
-transfers.  This will avoid the need to reenter your password for every
+transfers.  This will avoid the need to reenter your password for every
 file transfer:
 
 {{< figure src="/images/7274498.png" height="450" >}}
@@ -96,9 +96,9 @@ To add an HCC machine, in the bookmarks pane click the "+" icon:
 
 {{< figure src="/images/7274500.png" height="450" >}}
 
-Ensure the type of connection is SFTP.  Enter the hostname of the machine 
+Ensure the type of connection is SFTP.  Enter the hostname of the machine 
 you wish to connect to (crane.unl.edu, rhino.unl.edu) in the **Server**
-field, and your HCC username in the **Username** field.  The
+field, and your HCC username in the **Username** field.  The
 **Nickname** field is arbitrary, so enter whatever you prefer.
 
 {{< figure src="/images/7274501.png" height="450" >}}
@@ -112,15 +112,15 @@ and click *Login*.
 
 {{< figure src="/images/7274508.png" height="450" >}}
 
-A second login dialogue will now appear.  Notice the text has changed to
+A second login dialogue will now appear.  Notice the text has changed to
 say Duo two-factor.
 
 {{< figure src="/images/7274510.png" height="450" >}}
 
-Clear the **Password** field in the dialogue.  If you are using the Duo
+Clear the **Password** field in the dialogue.  If you are using the Duo
 Mobile app, enter '1' to have a push notification send to your phone or
-tablet.  If you are using a Yubikey, ensure the cursor is active in the
-**Password** field, and press the button on the Yubikey.
+tablet.  If you are using a Yubikey, ensure the cursor is active in the
+**Password** field, and press the button on the Yubikey.
 
 {{< figure src="/images/7274509.png" height="450" >}}
 
diff --git a/content/quickstarts/connecting/for_windows_users.md b/content/connecting/for_windows_users.md
similarity index 73%
rename from content/quickstarts/connecting/for_windows_users.md
rename to content/connecting/for_windows_users.md
index fb3f1a78c79b221d42d712b8d652eabd5705f642..9d915ca4e06c8993892f237acd1368afab3f5627 100644
--- a/content/quickstarts/connecting/for_windows_users.md
+++ b/content/connecting/for_windows_users.md
@@ -22,7 +22,7 @@ Access to HCC Supercomputers
 
 {{% notice info %}}
 If you are on a Mac, please use the quickstart for [For Mac/Linux
-Users]({{< relref "for_maclinux_users" >}}).
+Users]({{< relref "/connecting/for_maclinux_users" >}}).
 {{% /notice %}}
 
 
@@ -30,16 +30,16 @@ Users]({{< relref "for_maclinux_users" >}}).
 --------------
 For Windows 10 users, use the Command Prompt, accessed by entering `cmd` in the start menu, to access to the
 HCC supercomputers. In the Command Prompt,
-type `ssh <username>@crane.unl.edu` and the corresponding password
-to get access to the HCC cluster **Crane**. Note that &lt;username&gt;
-should be replaced by your HCC account username. If you do not have a
+type `ssh <username>@tusker.unl.edu` and the corresponding password
+to get access to the HCC cluster **Tusker**. Note that &lt;username&gt;
+should be replaced by your HCC account username. If you do not have a
 HCC account, please contact a HCC specialist
 ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu))
 or go to http://hcc.unl.edu/newusers.
-
+To use the **Crane** cluster, replace tusker.unl.edu with crane.unl.edu.
 
 {{< highlight bash >}}
-C:\> ssh <username>@crane.unl.edu
+C:\> ssh <username>@tusker.unl.edu
 C:\> <password>
 {{< /highlight >}}
 
@@ -49,21 +49,21 @@ C:\> <password>
 --------------
 This quick start will help you configure your personal computer to work
 with the HCC supercomputers. Here we use the two third party application
-**PuTTY** and **WinSCP** for demonstration.
+**PuTTY** and **WinSCP** for demonstration.
 
 PuTTY: https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html
-or [Direct Link](https://the.earth.li/~sgtatham/putty/latest/w32/putty.exe)
+or [Direct Link](https://the.earth.li/~sgtatham/putty/latest/w32/putty.exe)
 
 
 Here we use the HCC cluster **Tusker** for demonstration. To use the
-**Crane** cluster, replace `tusker.unl.edu` with `crane.unl.edu`.
+**Crane** or cluster, replace `tusker.unl.edu` with `crane.unl.edu`.
 
-1.  On the first screen, type `tusker.unl.edu` for Host Name, then click
-    **Open**. 
+1.  On the first screen, type `tusker.unl.edu` for Host Name, then click
+    **Open**. 
     {{< figure src="/images/3178523.png" height="450" >}}
-2.  On the second screen, click on **Yes**.  
+2.  On the second screen, click on **Yes**.  
     {{< figure src="/images/3178524.png" height="300" >}}
-3.  On the third screen, enter your HCC account **username**. If you do
+3.  On the third screen, enter your HCC account **username**. If you do
     not have a HCC account, please contact an HCC specialist
     ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu))
     or go to http://hcc.unl.edu/newusers.
@@ -73,9 +73,9 @@ Here we use the HCC cluster **Tusker** for demonstration. To use the
 
     {{< figure src="/images/8127261.png" height="450" >}}      
 
-4.  On the next screen, enter your HCC account **password**. 
+4.  On the next screen, enter your HCC account **password**. 
 
-    {{% notice info %}}**Note that PuTTY will not show the characters as you type for security reasons.**{{% /notice %}} 
+    {{% notice info %}}**Note that PuTTY will not show the characters as you type for security reasons.**{{% /notice %}} 
 
     {{< figure src="/images/8127262.png" height="450" >}}
 
@@ -85,7 +85,7 @@ Here we use the HCC cluster **Tusker** for demonstration. To use the
       
 6.  If you have a Yubikey set up by HCC, please hold the Yubikey for \~1
     second. Then you will be brought to your home directory similar as
-    below.   
+    below.   
      
     {{< figure src="/images/8127266.png" height="450" >}} 
       
@@ -105,21 +105,21 @@ Here we use the HCC cluster **Tusker** for demonstration. To use the
     home directory similar as below.  
 
     {{< figure src="/images/8127264.png" height="450" >}}
- 
+ 
 
 File Transferring with HCC Supercomputers
 -----------------------------------------
 
 {{% notice info%}}
-For best results when transfering data to and from the clusters, refer to [Handling Data]({{< ref "/guides/handling_data" >}})
+For best results when transfering data to and from the clusters, refer to [Handling Data]({{< ref "/handling_data" >}})
 {{%/notice%}}
 
 ### SCP
 
 For Windows users, file transferring between your personal computer
-and the HCC supercomputers can be achieved through the command `scp`.
+and the HCC supercomputers can be achieved through the command `scp`.
 Here we use **Tusker** for example. **The following commands should be
-executed from your computer. **
+executed from your computer. **
 
 **Uploading from local to remote**
 
@@ -130,7 +130,7 @@ C:\> scp -r .\<folder name> <username>@tusker.unl.edu:/work/<group name>/<userna
 The above command line transfers a folder from the current directory
 (`.\`) of the your computer to the `$WORK` directory of the HCC
 supercomputer, Tusker. Note that you need to replace `<group name>`
-and `<username>` with your HCC group name and username. 
+and `<username>` with your HCC group name and username. 
 
 **Downloading from remote to local**
 
@@ -138,8 +138,8 @@ and `<username>` with your HCC group name and username. 
 C:\> scp -r <username>@tusker.unl.edu:/work/<group name>/<username>/<folder name> .\
 {{< /highlight >}}
 
-The above command line transfers a folder from the `$WORK` directory of
-the HCC supercomputer, Tusker, to the current directory (`.\`) of the
+The above command line transfers a folder from the `$WORK` directory of
+the HCC supercomputer, Tusker, to the current directory (`.\`) of the
 your computer.
 
 
@@ -149,19 +149,19 @@ WinSCP: http://winscp.net/eng/download.php
 
 Usually it is convenient to upload and download files between your personal computer 
 and the HCC supercomputers through a Graphic User Interface (GUI).
-Download and install the third party application **WinSCP**
+Download and install the third party application **WinSCP**
 to connect the file systems between your personal computer and the HCC supercomputers. 
-Below is a step-by-step installation guide. Here we use the HCC cluster **Tusker**
-for demonstration. To use the **Crane** cluster, replace `tusker.unl.edu`
-with `crane.unl.edu`.
+Below is a step-by-step installation guide. Here we use the HCC cluster **Tusker**
+for demonstration. To use the **Crane** cluster, replace `tusker.unl.edu`
+with `crane.unl.edu`.
 
-1.  On the first screen, type `tusker.unl.edu` for Host name, enter your
+1.  On the first screen, type `tusker.unl.edu` for Host name, enter your
     HCC account username and password for User name and Password. Then
-    click on **Login**.
+    click on **Login**.
 
     {{< figure src="/images/3178530.png" height="450" >}}
 
-2.  On the second screen, click on **Yes**.  
+2.  On the second screen, click on **Yes**.  
 
     {{< figure src="/images/3178531.png" >}}
 
@@ -170,20 +170,20 @@ with `crane.unl.edu`.
 
     {{< figure src="/images/8127268.png" >}}      
 
-4.  On the third screen, click on **Remote**. Under Remote, choose Go To
+4.  On the third screen, click on **Remote**. Under Remote, choose Go To
     and Open Directory/Bookmark. Alternatively, you can use the keyboard
     shortcut "Ctrl + O".
 
     {{< figure src="/images/3178532.png" height="450" >}}
 
-5.  On the final screen, type `/work/<group name>/<username>` for Open
+5.  On the final screen, type `/work/<group name>/<username>` for Open
     directory. Use your HCC group name and username to replace
-    `<group name>` and `<username>`. Then click on **OK**.  
+    `<group name>` and `<username>`. Then click on **OK**.  
 
     {{< figure src="/images/3178533.png" height="450" >}}
 
 6.  Now you can drop and drag the files between your personal computer
-    and the HCC supercomputers.   
+    and the HCC supercomputers.   
     {{< figure src="/images/3178539.png" height="450" >}}
 
 Tutorial Video
@@ -192,3 +192,4 @@ Tutorial Video
 {{< youtube -Vh7SyC-3mA >}}
 
 
+
diff --git a/content/guides/running_applications/how_to_setup_x11_forwarding.md b/content/connecting/how_to_setup_x11_forwarding.md
similarity index 100%
rename from content/guides/running_applications/how_to_setup_x11_forwarding.md
rename to content/connecting/how_to_setup_x11_forwarding.md
diff --git a/content/quickstarts/connecting/mobaxterm_windows.md b/content/connecting/mobaxterm_windows.md
similarity index 88%
rename from content/quickstarts/connecting/mobaxterm_windows.md
rename to content/connecting/mobaxterm_windows.md
index 4e64a449499b43337476297b1bbab90cb9b1e38a..c3c9cac5bce87a158d8cdedf451203a317401f33 100644
--- a/content/quickstarts/connecting/mobaxterm_windows.md
+++ b/content/connecting/mobaxterm_windows.md
@@ -16,12 +16,12 @@ Access to HCC Supercomputers using MobaXterm
 To connect to HCC resources using MobaXterm, open the application and select the Session Icon. 
 {{< figure src="/images/moba/main.png" height="450" >}}
 
-Select SSH as the session type. Enter the cluster you are connecting to, in the example, `crane.unl.edu`, is used.  Check `Specify username`  and enter your HCC username in the the box. Note that &lt;username&gt;
-should be replaced by your HCC account username. If you do not have a
+Select SSH as the session type. Enter the cluster you are connecting to, in the example, `crane.unl.edu`, is used.  Check `Specify username`  and enter your HCC username in the the box. Note that &lt;username&gt;
+should be replaced by your HCC account username. If you do not have a
 HCC account, please contact a HCC specialist
 ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu))
 or go to https://hcc.unl.edu/newusers.
-To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu.
+To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu.
 {{< figure src="/images/moba/session.png" height="450" >}}
 
 Select OK. You will be asked to enter your password and to authenticate with duo. 
@@ -41,14 +41,14 @@ MobaXterm allows file transfering in a 'drag and drop' style, similar to WinSCP.
 
 The above example transfers a folder from a local directory of the your computer to the `$HOME` directory of the HCC
 supercomputer, Crane. Note that you need to replace `<group name>`
-and `<username>` with your HCC group name and username. 
+and `<username>` with your HCC group name and username. 
 {{< figure src="/images/moba/upload.png" height="450" >}}
 **Downloading from remote to local**
 
 
 
-The above example transfers a folder from the `$HOME` directory of
-the HCC supercomputer, Crane, to a local directory on 
+The above example transfers a folder from the `$HOME` directory of
+the HCC supercomputer, Crane, to a local directory on 
 your computer.
 {{< figure src="/images/moba/download.png" height="450" >}}
 **Editing remote files**
@@ -76,4 +76,5 @@ In the Generator window, click `Generate` and move your mouse around in the `Key
 After the key is generated, go to the `Conversions` tab and select `Export OpenSSH key` and save it to your local machine. 
 
 {{< figure src="/images/moba/exportssh.png" height="450" >}}
-**Treat the private key file the same as you would a password.  Keep your private key in a secure location and do not share it with anyone.**
\ No newline at end of file
+**Treat the private key file the same as you would a password.  Keep your private key in a secure location and do not share it with anyone.**
+
diff --git a/content/quickstarts/reusing_ssh_connections_in_linux_or_mac.md b/content/connecting/reusing_ssh_connections_in_linux_or_mac.md
similarity index 100%
rename from content/quickstarts/reusing_ssh_connections_in_linux_or_mac.md
rename to content/connecting/reusing_ssh_connections_in_linux_or_mac.md
diff --git a/content/events/2013/HCC_Supercomputing_Symposium_2013.md b/content/events/2013/HCC_Supercomputing_Symposium_2013.md
deleted file mode 100644
index 6e1dbc7e1fe69a97f08832003df31c645d1390e7..0000000000000000000000000000000000000000
--- a/content/events/2013/HCC_Supercomputing_Symposium_2013.md
+++ /dev/null
@@ -1,31 +0,0 @@
-+++
-title = "HCC Supercomputing Symposium 2013"
-description = "HCC Supercomputing Symposium 2013"
-+++
-
-<strong>Location: Wick Alumni Center, Dana Board Room</strong><br>
-The morning sessions will be interactive – attendees should bring a laptop if at all possible!  The first sessions will be primarily planned tutorial format, while the “Open Lab” will involve more individualized help of those in attendance as desired.
-
-| Time    | Title                                                 | Description                                                      |
-|---------|-------------------------------------------------------|------------------------------------------------------------------|
-| 9 - 10am| Getting started at HCC        | HCC 101 (Linux Primer, Job submission) [Quick Start Guides](https://hcc.unl.edu/docs/quickstarts/)                 |
-| 10 - 11am | Job management						  | Creating and deploying parallel jobs at HCC, OSG [Submitting Jobs](https://hcc.unl.edu/docs/guides/submitting_jobs/)  |
-| 11 - 11:45am    | HCC Open Lab                                    |      Presentations and help from HCC staff                                          |
-| 12 - 1pm     | LUNCH lecture with UNIT         | Carl Lundstedt: How Big Data Allows Us to Investigate the Smallest Things                 |
-|1:15 – 2pm   | New User Spotlight – Steve Kolbe, Theatre                           | Media Arts Film Rendering @ HCC                   |
-| 2 - 3pm     | State of HCC; Crane Announcement                 | David Swanson                           |
-
-1.  <span style="line-height: 1.4285715;">Getting Started at HCC (9-10am</span>  
-    1.  [Getting an account at HCC](https://hcc.unl.edu/new-user-request)
-    2.  Logging into HCC resources for [windows](https://hcc.unl.edu/docs/quickstarts/connecting/for_windows_users/) or [Mac/Linux Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_maclinux_users/)
-	3.	Basic Linux commands
-	4.	[Transferring files to HCC](https://hcc.unl.edu/docs/guides/handling_data/)
-2.  Job Management (10-11am)
-    1.  [End to End MPI](https://hcc.unl.edu/docs/guides/running_applications/mpi_jobs_on_hcc/) example.  Including how to transfer source code and compile an MPI application on Tusker.
-    2.  [Compiling](https://hcc.unl.edu/docs/guides/running_applications/compiling_source_code/compiling_an_openmp_application/) and [submitting](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_an_openmp_job/) multi-thread and OpenMP applications.
-    3.  Using scheduler features such as [Arrays](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_a_job_array/
-) and [Dependencies](https://hcc.unl.edu/docs/guides/submitting_jobs/job_dependencies/
-).
-    4.  Debugging on Tusker by using [interactive submission](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_an_interactive_job/). 
-        <span style="line-height: 1.4285715;">&lt;lunch&gt;</span>
-
diff --git a/content/events/2013/hcc_condor_workshop_2013.md b/content/events/2013/hcc_condor_workshop_2013.md
deleted file mode 100644
index 5b70a75a2d9d6690a712743dc732e959e833c0b0..0000000000000000000000000000000000000000
--- a/content/events/2013/hcc_condor_workshop_2013.md
+++ /dev/null
@@ -1,50 +0,0 @@
-+++
-title = "HCC Condor Workshop 2013"
-description = "HCC Condor Workshop 2013."
-+++
-
-Condor
-============
-
-Workshop presentation slides
-&lt;[condor\_workshop.pdf](https://unl.box.com/s/ou8tf62bqkbrh7yx0cl4me1zbp3z0j08)&gt;
-
-| Time    | Title                   | Description                                    |
-|---------|-------------------------|------------------------------------------------|
-| 1pm-2pm | Demo/Hand-On Practice   | Executing Condor jobs on HCC supertcomputers   |
-| 2pm-3pm | Individual Consultation | Bring your research code on HCC supercomputers |
-
-Password for all demo accounts is **HCC\_condor2013**.
-Replace `<group name>` with `demo` and `<username>` with `demoXX` (e.g. `demo01`).
-
--   Get started with [For Windows Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_windows_users/)
-    or [For Mac/Linux Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_maclinux_users/).
--   Sample code for Fortran/C on HCC:
-    &lt;[serial\_dir.zip](https://unl.box.com/s/khkpt68pe3k0lu2ythn9kzjeva40itdd)&gt;
--   Sample code for Condor on HCC:
-    &lt;[condor\_dir.zip](https://unl.box.com/s/qpvgnqr9ukjcmt0d2trde4qcp5e5ez17)&gt;
--   For more details of this demonstration, see [Condor Jobs on
-    HCC](https://hcc.unl.edu/docs/guides/submitting_jobs/condor_jobs_on_hcc/).
-
-MATLAB
-============
-Workshop presentation slides
-&lt;[matlab\_workshop.pdf](https://unl.box.com/s/ou8tf62bqkbrh7yx0cl4me1zbp3z0j08)&gt;
-
-| Time    | Title                   | Description                                       |
-|---------|-------------------------|---------------------------------------------------|
-| 1pm-2pm | Demo/Hand-On Practice   | Implementing a MATLAB code on HCC supertcomputers |
-| 2pm-3pm | Individual Consultation | Bring your MATLAB code on HCC supercomputers      |
-
-Password for all demo accounts is **HCC\_matlab2013**.
-Replace `<group name>` with `demo` and `<username>` with `demoXX` (e.g. `demo01`).
-
-
--   Get started with [For Windows Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_windows_users/)
-    or [For Mac/Linux Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_maclinux_users/).
--   Hands On: [MATLAB on HCC](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_matlab_jobs/)
--   Sample code for MATLAB on HCC:
-    &lt;[matlab\_dir.zip](https://unl.box.com/s/u19fy7cjeswfl1wi7h1nkeeie8gl3z90)&gt;
--   Sample code for Parallel MATLAB Job:
-    &lt;[parallel\_matlab\_dir.zip](https://unl.box.com/s/fhl3kf6hg8dmtozkphq3u2r58yexrppe)&gt;
-
diff --git a/content/events/2013/hcc_matlab_workshop_2013.md b/content/events/2013/hcc_matlab_workshop_2013.md
deleted file mode 100644
index 6ef348f6d65835a80c75586b677a9759e6294241..0000000000000000000000000000000000000000
--- a/content/events/2013/hcc_matlab_workshop_2013.md
+++ /dev/null
@@ -1,29 +0,0 @@
-+++
-title = "HCC Matlab Workshop 2013"
-description = "HCC Matlab Workshop 2013."
-+++
-
-Workshop presentation slides
-&lt;[matlab\_workshop.pdf](https://unl.box.com/s/lulbnbrnr7xqwufrx5s20gxnyxpykn7j)&gt;
-
-| Time        | Title                   | Description                                      |
-|-------------|-------------------------|--------------------------------------------------|
-| UNMC        |                         |                                                  |
-| 10am - 11am | Demo/Hand-On Practice   | Implementing a MATLAB code on HCC supercomputers |
-| 11am - 12pm | Individual Consultation | Bring your MATLAB code on HCC supercomputers     |
-| UNO         |                         |                                                  |
-| 2pm - 3pm   | Demo/Hand-On Practice   | Implementing a MATLAB code on HCC supercomputers |
-| 3pm - 4pm   | Individual Consultation | Bring your MATLAB code on HCC supercomputers     |
-
-
-Password for all demo accounts is **HCC\_matlab2013**.
-Replace `<group name>` with `demo` and `<username>` with `demoXX`
-(e.g. `demo01`).
-
--   Get started with [For Windows Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_windows_users/)
-    or [For Mac/Linux Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_maclinux_users/).
--   Hands On: [MATLAB on HCC](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_matlab_jobs/)
--   Sample code for MATLAB on HCC:
-    &lt;[matlab\_dir.zip](https://unl.box.com/s/u19fy7cjeswfl1wi7h1nkeeie8gl3z90)&gt;
--   Sample code for Parallel MATLAB Job:
-    &lt;[parallel\_matlab\_dir.zip](https://unl.box.com/s/fhl3kf6hg8dmtozkphq3u2r58yexrppe)&gt;
diff --git a/content/events/2013/hcc_mpi_workshop_2013.md b/content/events/2013/hcc_mpi_workshop_2013.md
deleted file mode 100644
index 0f8b72c7cdcb266d98dd45401316cfb937ec4e26..0000000000000000000000000000000000000000
--- a/content/events/2013/hcc_mpi_workshop_2013.md
+++ /dev/null
@@ -1,25 +0,0 @@
-+++
-title = "HCC MPI Workshop 2013"
-description = "HCC MPI Workshop 2013."
-+++
-
-Workshop presentation slides &lt;[mpi\_workshop.pdf](https://unl.box.com/s/7rufq2a39n2vektg19ko9fjcbios4kqp)&gt;
-
-| Time    | Title                   | Description                                    |
-|---------|-------------------------|------------------------------------------------|
-| 1pm-2pm | Demo/Hand-On Practice   | Implementing a MPI code on HCC supertcomputers |
-| 2pm-3pm | Individual Consultation | Bring your research code on HCC supercomputers |
-
-Password for all demo accounts is **HCC\_mpi2013**.
-Replace `<group name>` with `demo` and `<username>` with `demoXX` (e.g. `demo01`).
-
--   Get started with [For Windows Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_windows_users/)
-    or [For Mac/Linux Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_maclinux_users/).
--   Sample code for Fortran/C on HCC:
-    &lt;[serial\_dir.zip](https://unl.box.com/s/khkpt68pe3k0lu2ythn9kzjeva40itdd)&gt;
--   Sample code for MPI on HCC:
-    &lt;[mpi\_dir.zip](https://unl.box.com/s/fekjdxx82gxp3x75nbsrqmrri52d4zlp)&gt;
--   For more details of this demonstration, see [MPI Jobs on
-    HCC](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_an_mpi_job/).  
-
-
diff --git a/content/events/2016/hcc_qiime_workshop_2016.md b/content/events/2016/hcc_qiime_workshop_2016.md
deleted file mode 100644
index 5b97b54595210a70b04eee24f9a081c2972eeea6..0000000000000000000000000000000000000000
--- a/content/events/2016/hcc_qiime_workshop_2016.md
+++ /dev/null
@@ -1,71 +0,0 @@
-+++
-title = "HCC QIIME Workshop 2016"
-description = "HCC QIIME Workshop 2016."
-+++
-
-When:   October 20, 2016
-
-Where:  NIC Food Innovation Campus room 277
-
-             1901 N. 21st St., Lincoln, 68588
-
-[Details and Agenda](https://hcc.unl.edu/hcc-qiime-workshop)
-
-------------------------------------------------------------------------
-
-Materials
----------
-
-Slides for morning
-introduction: [QIIME\_2016\_intro.pdf](https://unl.box.com/s/d2o92uoar7x59a7rxl0f3y7o3ncy0swq)
-
-Software Carpentry Unix Shell
-Lessons: [http://eharstad.github.io/shell-novice/](http://eharstad.github.io/shell-novice/)
-
-Slides for HCC
-Overview: [2016\_Qiime.pdf](https://unl.box.com/s/4gryu7ny8lo95fs8clyu566k2ubbhtef)
-
-QIIME
-slides: [QIIME\_Talk\_Oct20.pdf](https://unl.box.com/s/h4zi0h27t1t16u9due3ybfrxmsdebzin)
-
-QIIME
-tutorial: [http://bioinformatics.unl.edu/qiime_tutorial.pdf](http://bioinformatics.unl.edu/qiime_tutorial.pdf)
-
-------------------------------------------------------------------------
-
-**What you need to do before Thursday October 20th**
-----------------------------------------------------
-
-*Participants will need to bring a laptop with them to the workshop.* In addition, there are a few things that are required to be completed before you arrive:
---------------------------------------------------------------------------------------------------------------------------------------------------------------
-
-1.  To participate in this workshop, you will need to
-    [sign up for an HCC account](http://hcc.unl.edu/new-user-request)
-    (if you do not already have one). Please complete the account
-    application and [DUO two-factor authentication
-    setup](https://hcc-docs.unl.edu/display/HCCDOC/Setting+up+and+using+Duo)
-    before Thursday. This process requires advisor approval (via email)
-    and a visit to the HCC offices in the Schorr Center to activate your
-    account, so it can sometimes take a day or more to complete. Please
-    plan accordingly. If you still do not have your account set up by
-    Thursday, please arrive at the workshop by 8:30AM to get help.
-2.  Once you have your HCC account and DUO set up, please make sure that
-    you are able to log into the HCC clusters (instructions for [Windows
-    Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_windows_users/)
-    and [Mac/Linux
-    Users](https://hcc.unl.edu/docs/quickstarts/connecting/for_maclinux_users/).
-    If you have any problems logging in, please email us at
-    [hcc-support@unl.edu](mailto:hcc-support@unl.edu)
-3.  On Thursday we will be demonstrating how to transfer files using
-    Globus Connect. Before arriving at the workshop, please log in to
-    the
-    [Globus Web App](https://www.globus.org/app/transfer)
-    using your My.UNL credentials (choose *University of
-    Nebraska-Lincoln* from the drop down menu).  Next, install the
-    Globus Connect Personal app on your laptop (directions for
-    [Mac OS X](https://docs.globus.org/how-to/globus-connect-personal-mac/),
-    [Linux](https://docs.globus.org/how-to/globus-connect-personal-linux/),
-    and
-    [Windows](https://docs.globus.org/how-to/globus-connect-personal-windows/)
-    so that you can begin using Globus to transfer data to and from your
-    laptop.
diff --git a/content/events/2017/unk_linear_algebra_feb_28th_2017.md b/content/events/2017/unk_linear_algebra_feb_28th_2017.md
deleted file mode 100644
index a81c2fd9ff6cdd0386fce2009cdb6c9672b99caf..0000000000000000000000000000000000000000
--- a/content/events/2017/unk_linear_algebra_feb_28th_2017.md
+++ /dev/null
@@ -1,75 +0,0 @@
-+++
-title = "UNK Linear Algebra, Feb. 28, 2017"
-description = "UNK Linear Algebra, Feb. 28, 2017."
-+++
-
-**If at any time today you have difficulties or become lost, please
-place the <span style="color: rgb(255,0,0);">red</span> sticky note on
-top of your monitor and a helper will be around to assist you.**
-
-    For these instructions, any commands to be typed into the terminal will be formatted like this.
-
-**What is a cluster:**
-----------------------
-
-![cluster image](/images/cluster_small.png)
-
-(picture courtesy of:
-[http://training.h3abionet.org/technical_workshop_2015/?page_id=403](http://training.h3abionet.org/technical_workshop_2015/?page_id=403))
-
-**  
-**
-
-**To connect to the Crane cluster:**
-------------------------------------
-
--   insert the Yubikey into the computer's USB drive. There should be a
-    small green light in the middle of the Yubikey to indicate it is
-    inserted correctly.
--   Open your preferred web browser and navigate to
-    [http://go.unl.edu/cranessh](http://go.unl.edu/cranessh)
--   Click "Start SSH session to crane.unl.edu"  
-{{% notice info %}}
-The link above is no longer available. If you wish to use a terminal in your browser, Sandstone is an option [https://hcc.unl.edu/docs/guides/sandstone/](https://hcc.unl.edu/docs/guides/sandstone/)
-{{% /notice %}}
-   ![](/images/ssh.png)
--   Click the "Terminal: SSH" icon to begin the SSH session  
-    ![](/images/terminalSSH.png)
--   Type in the provided Username and Password. Note that the password
-    will not display on screen, but rest assured that even though
-    nothing is being output, your password is being entered as you type.
--   At the "Passcode:" prompt, press your finger to the gold circle in
-    the middle of the Yubikey until a string of characters appears on
-    screen.  
-    ![](/images/yubikey.png)
--   If you logged in successfully, your screen should look similar to
-    the one belo  
-    ![](/images/crane_login.png)
-	
-**Linux Commands Reference List:**
-----------------------------------
-https://hcc.unl.edu/docs/quickstarts/connecting/basic_linux_commands/
-
-**To run MATLAB interactively:**
---------------------------------
-
--   After logging into the cluster, navigate to your $WORK directory:
-    -   cd $WORK
-
--   Request an interactive job:
-    -   srun --reservation=unk --mem=4096 --pty $SHELL
-
--   Load the MATLAB module:
-    -   module load matlab
-
--   Run MATLAB:
-    -   matlab
-
-**To access the MATLAB Tutorial:**
-----------------------------------
-
--   Navigate to your $WORK directory: 
-    -   cd $WORK
-
--   Clone the github repo containing the tutorial files:
-    -   git clone https://github.com/unlhcc/HCCWorkshops.git
diff --git a/content/events/2017/unl_r_for_biologists_class_march_8_2017.md b/content/events/2017/unl_r_for_biologists_class_march_8_2017.md
deleted file mode 100644
index 8a25c915cad2b0ca3b2a51df00ae1ddee070dd02..0000000000000000000000000000000000000000
--- a/content/events/2017/unl_r_for_biologists_class_march_8_2017.md
+++ /dev/null
@@ -1,114 +0,0 @@
-+++
-title = "R for Biologists, March 8, 2017"
-description = "R for Biologists, March 8, 2017."
-+++
-
-**We will be utilizing <span style="color: rgb(255,0,0);">red</span> and
-<span style="color: rgb(51,153,102);">green</span> sticky notes today.
-If you run into problems or have questions,**
-
-**please place the <span style="color: rgb(255,0,0);">red</span> sticky
-note to the back of your computer screen and a helper will assist you.**
-
- 
-
-If you have not already requested an HCC account under the rcourse998
-group, please do so
-[here](https://hcc.unl.edu/new-user-request)
-
-If you already have an HCC account and need to be added to the
-rcourse998 group, please let us know.
-
-If you have not previously set up Duo Authentication, please ask for
-assistance.
-
- 
-
-**Set up Instructions:**
-
-**Windows:**
-
-For Windows will we use two third party
-application **PuTTY** and **WinSCP** for demonstration.
-
-PuTTY:  
-&lt;[http://www.putty.org/](http://www.putty.org/)&gt;
-
-WinSCP:
-&lt; [http://winscp.net/eng/download.php](http://winscp.net/eng/download.php)&gt;
-
-**Mac/Linux:**
-
-Mac and Linux users will need to download and install **Cyberduck**.
-Detailed information for downloading and setting up Cyberduck can be
-found here: [For Mac/Linux Users](https://cyberduck.io/)
-
- 
-
-**Linux Commands Reference List:**
-
-[https://hcc.unl.edu/docs/quickstarts/connecting/basic_linux_commands/](https://hcc.unl.edu/docs/quickstarts/connecting/basic_linux_commands/)  
-
- 
-
-  **R core and R Studio:**
-
-We will be writing scripts offline in RStudio and then uploading them to
-execute them on the cluster. This lesson assumes you have the R core and
-RStudio installed. If you do not you can install them here:
-
-R
-core: [https://cloud.r-project.org/](https://cloud.r-project.org/)
-
-RStudio: [https://www.rstudio.com/products/rstudio/download/](https://www.rstudio.com/products/rstudio/download/)
-
- 
-
-**Required Packages:**
-
-We will also be using the dplyr, ggplot2 and maps package. If you do not
-have these installed, please install them now. You can do so using the
-following commands inside the RStudio console:
-
-    install.packages("dplyr")
-
-    install.packages("ggplot2")
-
-    install.packages("maps")
-
- 
-
-**What is a cluster:**
-
-![](/images/cluster_small.png)
-
-(picture courtesy
-of: [http://training.h3abionet.org/technical_workshop_2015/?page_id=403](http://training.h3abionet.org/technical_workshop_2015/?page_id=403))
-
-### To download the tutorial files:
-
--   Navigate to your $WORK directory: 
-    -   cd $WORK
-
--   Clone the github repo containing the tutorial files:
-    -   git clone https://github.com/unlhcc/HCCWorkshops.git
-
- 
-
-Take Home Exercise:
-
-[Data Analysis in R](https://unl.box.com/s/8i647f8are21tc11la0jqk2xddlg19wy) - Please note
-that the on the bottom of page three, there is a missing parenthesis at
-the end of the last command.
-
-The final code chunk should read:
-
-    # Calculate flight age using birthmonth
-
-    age <- data.frame(names(acStart), acStart, stringsAsFactors=FALSE)
-
-    colnames(age) <- c("TailNum", "acStart")
-
-    flights <- left_join(flights, age, by="TailNum")
-
-    flights <- mutate(flights, Age = (flights$Year * 12) + flights$Month - flights$acStart)
\ No newline at end of file
diff --git a/content/faq/_index.md b/content/faq/_index.md
deleted file mode 100644
index 1090e762238e137bcedf7e5e923bf2848b494010..0000000000000000000000000000000000000000
--- a/content/faq/_index.md
+++ /dev/null
@@ -1,177 +0,0 @@
-+++
-title = "FAQ"
-description = "HCC Frequently Asked Questions"
-weight = "20"
-+++
-
-- [I have an account, now what?](#i-have-an-account-now-what)
-- [How do I change my password?](#how-do-i-change-my-password)
-- [I forgot my password, how can I retrieve it?](#i-forgot-my-password-how-can-i-retrieve-it)
-- [I just deleted some files and didn't mean to! Can I get them back?](#i-just-deleted-some-files-and-didn-t-mean-to-can-i-get-them-back)
-- [How do I (re)activate Duo?](#how-do-i-re-activate-duo)
-- [How many nodes/memory/time should I request?](#how-many-nodes-memory-time-should-i-request)
-- [I am trying to run a job but nothing happens?](#i-am-trying-to-run-a-job-but-nothing-happens)
-- [I keep getting the error "slurmstepd: error: Exceeded step memory limit at some point." What does this mean and how do I fix it?](#i-keep-getting-the-error-slurmstepd-error-exceeded-step-memory-limit-at-some-point-what-does-this-mean-and-how-do-i-fix-it)
-- [I want to talk to a human about my problem. Can I do that?](#i-want-to-talk-to-a-human-about-my-problem-can-i-do-that)
-
----
-
-#### I have an account, now what?
-
-Congrats on getting an HCC account! Now you need to connect to a Holland
-cluster. To do this, we use an SSH connection. SSH stands for Secure
-Shell, and it allows you to securely connect to a remote computer and
-operate it just like you would a personal machine.
-
-Depending on your operating system, you may need to install software to
-make this connection. Check out on Quick Start Guides for information on
-how to install the necessary software for your operating system
-
-- [For Mac/Linux Users]({{< relref "for_maclinux_users" >}})
-- [For Windows Users]({{< relref "for_windows_users" >}})
-
-#### How do I change my password?
-
-#### I forgot my password, how can I retrieve it?
-
-Information on how to change or retrieve your password can be found on
-the documentation page: [How to change your
-password]({{< relref "how_to_change_your_password" >}})
-
-
-All passwords must be at least 8 characters in length and must contain
-at least one capital letter and one numeric digit. Passwords also cannot
-contain any dictionary words. If you need help picking a good password,
-consider using a (secure!) password generator such as
-[this one provided by Random.org](https://www.random.org/passwords)
-
-To preserve the security of your account, we recommend changing the
-default password you were given as soon as possible.
-
-#### I just deleted some files and didn't mean to! Can I get them back?
-
-That depends. Where were the files you deleted?
-
-**If the files were in your $HOME directory (/home/group/user/):** It's
-possible.
-
-$HOME directories are backed up daily and we can restore your files as
-they were at the time of our last backup. Please note that any changes
-made to the files between when the backup was made and when you deleted
-them will not be preserved. To have these files restored, please contact
-HCC Support at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
-as soon as possible.
-
-**If the files were in your $WORK directory (/work/group/user/):** No.
-
-Unfortunately, the $WORK directories are created as a short term place
-to hold job files. This storage was designed to be quickly and easily
-accessed by our worker nodes and as such is not conducive to backups.
-Any irreplaceable files should be backed up in a secondary location,
-such as Attic, the cloud, or on your personal machine. For more
-information on how to prevent file loss, check out [Preventing File
-Loss]({{< relref "preventing_file_loss" >}}).
-
-#### How do I (re)activate Duo?
-
-**If you have not activated Duo before:**
-
-Please stop by
-[our offices](http://hcc.unl.edu/location)
-along with a photo ID and we will be happy to activate it for you. If
-you are not local to Omaha or Lincoln, contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
-and we will help you activate Duo remotely.
-
-**If you have activated Duo previously but now have a different phone
-number:**
-
-Stop by our offices along with a photo ID and we can help you reactivate
-Duo and update your account with your new phone number.
-
-**If you have activated Duo previously and have the same phone number:**
-
-Email us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
-from the email address your account is registered under and we will send
-you a new link that you can use to activate Duo.
-
-#### How many nodes/memory/time should I request?
-
-**Short answer:** We don’t know.
-
-**Long answer:** The amount of resources required is highly dependent on
-the application you are using, the input file sizes and the parameters
-you select. Sometimes it can help to speak with someone else who has
-used the software before to see if they can give you an idea of what has
-worked for them.
-
-But ultimately, it comes down to trial and error; try different
-combinations and see what works and what doesn’t. Good practice is to
-check the output and utilization of each job you run. This will help you
-determine what parameters you will need in the future.
-
-For more information on how to determine how many resources a completed
-job used, check out the documentation on [Monitoring Jobs]({{< relref "monitoring_jobs" >}}).
-
-#### I am trying to run a job but nothing happens?
-
-Where are you trying to run the job from? You can check this by typing
-the command \`pwd\` into the terminal.
-
-**If you are running from inside your $HOME directory
-(/home/group/user/)**:
-
-Move your files to your $WORK directory (/work/group/user) and resubmit
-your job.
-
-The worker nodes on our clusters have read-only access to the files in
-$HOME directories. This means that when a job is submitted from $HOME,
-the scheduler cannot write the output and error files in the directory
-and the job is killed. It appears the job does nothing because no output
-is produced.
-
-**If you are running from inside your $WORK directory:**
-
-Contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
-with your login, the name of the cluster you are running on, and the
-full path to your submit script and we will be happy to help solve the
-issue.
-
-##### I keep getting the error "slurmstepd: error: Exceeded step memory limit at some point." What does this mean and how do I fix it?
-
-This error occurs when the job you are running uses more memory than was
-requested in your submit script.
-
-If you specified `--mem` or `--mem-per-cpu` in your submit script, try
-increasing this value and resubmitting your job.
-
-If you did not specify `--mem` or `--mem-per-cpu` in your submit script,
-chances are the default amount allotted is not sufficient. Add the line
-
-{{< highlight batch >}}
-#SBATCH --mem=<memory_amount>
-{{< /highlight >}}
-
-to your script with a reasonable amount of memory and try running it again. If you keep
-getting this error, continue to increase the requested memory amount and
-resubmit the job until it finishes successfully.
-
-For additional details on how to monitor usage on jobs, check out the
-documentation on [Monitoring Jobs]({{< relref "monitoring_jobs" >}}).
-
-If you continue to run into issues, please contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
-for additional assistance.
-
-#### I want to talk to a human about my problem. Can I do that?
-
-Of course! We have an open door policy and invite you to stop by
-[either of our offices](http://hcc.unl.edu/location)
-anytime Monday through Friday between 9 am and 5 pm. One of the HCC
-staff would be happy to help you with whatever problem or question you
-have.  Alternatively, you can drop one of us a line and we'll arrange a
-time to meet:  [Contact Us](https://hcc.unl.edu/contact-us).
-
diff --git a/content/guides/_index.md b/content/guides/_index.md
deleted file mode 100644
index e581928afd9378bc73cd1b66e9aa556be3491c0d..0000000000000000000000000000000000000000
--- a/content/guides/_index.md
+++ /dev/null
@@ -1,9 +0,0 @@
-+++
-title = "Guides"
-weight = "20"
-+++
-
-In-depth guides to using HCC resources
---------------------------------------
-
-{{% children description="true" %}}
diff --git a/content/guides/anvil/_index.md b/content/guides/anvil/_index.md
deleted file mode 100644
index d1fa90e1d2a6b18e408c9d3599e88846c5ec227c..0000000000000000000000000000000000000000
--- a/content/guides/anvil/_index.md
+++ /dev/null
@@ -1,196 +0,0 @@
-+++
-title = "Anvil: HCC's Cloud"
-description = "How to use Anvil, HCC's OpenStack-based cloud resource"
-weight = "40"
-+++
-
-- [Overview](#overview)
-- [Cloud Terms](#cloud-terms)
-- [Steps for Access](#steps-for-access)
-- [Backups](#backups)
-
-
-{{% notice tip %}}
-Have your account and ready to go? Visit the Anvil OpenStack web
-interface at https://anvil.unl.edu.
-{{% /notice %}}
-
----
-
-### Overview
-
-Anvil is the Holland Computing Center's cloud computing resource, based
-on the [OpenStack](https://www.openstack.org) software.  
-OpenStack is a free and open-source software platform for
-cloud computing.  Anvil was created to address the needs of NU's
-research community that are not well served by a traditional
-batch-scheduled Linux cluster environment.  Examples of use cases that
-are well suited to Anvil include:
-
-- A highly interactive environment, especially GUI applications
-- Require root-level access, such as kernel modification or
-  virtualization work
-- Alternate operating systems, such as Windows or other distributions
-  of Linux
-- Test cluster environments for various software frameworks, such as
-  [Hadoop](http://hadoop.apache.org)
-  or [Spark](https://spark.apache.org)
-- Cluster applications that require a persistent resource, such as a
-    web or database server
-
-Using Anvil, one or more virtual machines (VMs) can be easily be created
-via a user-friendly web dashboard.  Created VMs are then accessible from
-HCC clusters, or your own workstation once connected to the Anvil
-Virtual Private Network (VPN).  Access is through standard means,
-typically via SSH for Linux VMs and Remote Desktop for Windows VMs.
-
-### Cloud Terms
-
-There are a few terms used within the OpenStack interface and in the
-instructions below that may be unfamiliar.  The following brief
-definitions may be useful.  More detailed information is available in
-the [OpenStack User Guide](http://docs.openstack.org/user-guide).
-
-- **Project**:  A project is the base unit of ownership in
-  OpenStack.  Resources (CPUs, RAM, storage, etc.) are allocated and
-  user accounts are associated with a project.  Within Anvil, each HCC
-  research group corresponds directly to a project.  Similar to
-  resource allocation on HCC clusters, the members of a group share
-  the [project's resources]({{< relref "what_are_the_per_group_resource_limits" >}}).
-     
-- **Image**:  An image corresponds to everything needed to create a
-  virtual machine for a specific operating system (OS), such as Linux
-  or Windows.  HCC creates and maintains [basic Windows and Linux]({{< relref "available_images" >}})
-  images for convenience.
-  Users can also create their own images that can then be uploaded to
-  OpenStack and used within the project.
-     
-- **Flavor**:  A flavor (also known as *instance type*), defines the
-  parameters (i.e. resources) of the virtual machine.  This includes
-  things such as number of CPUs, amount of RAM, storage, etc.  There
-  are many instance types [available within Anvil]({{< relref "anvil_instance_types" >}}),
-  designed to meet a variety of needs.
-     
-- **Instance**:  An instance is a running virtual machine, created
-  by combining an image (the basic OS) with a flavor (resources).
-   That is, *Image + Flavor = Instance*.
-     
-- **Volume**:  A volume is a means for persistent storage within
-  OpenStack.  When an instance is destroyed, any additional data that
-  was on the OS hard drive is lost.  A volume can be thought of
-  similar to an external hard drive.  It can be attached to an
-  instance and accessed as a second drive.  When the instance is
-  destroyed, data on the volume is retained.  It can then be attached
-  and accessed from another instance later.
-
-### Steps for Access
-
-The guide below outlines the steps needed to begin using Anvil.  Please
-note that Anvil is currently in the *beta testing* phase.  While
-reasonable precautions are taken against data loss, **sole copies of
-precious or irreproducible data should not be placed or left on Anvil**.
-
-1.  **Request access to Anvil**
-    Access and resources are provided on a per-group basis, similar to
-    HCC clusters.  For details, please see [What are the per group
-    resource limits?]({{< relref "what_are_the_per_group_resource_limits" >}})
-    To begin using Anvil, user should fill out the short request form
-    at http://hcc.unl.edu/request-anvil-access.
-    An automated confirmation email will be sent. After group owner approves the request, an HCC staff
-    member will follow-up once access is available.
-
-2.  **Create SSH keys**
-    OpenStack uses SSH key pairs to identify users and control access to
-    the VMs themselves, as opposed to the traditional username/password
-    combination.  SSH key pairs consist of two files, a public key and a
-    private key.  The public file can be shared freely; this file will
-    be uploaded to OpenStack and associated with your account.  The
-    private key file should be treated the same as a password.  **Do not
-    share your private key and always keep it in a secure location.**
-     Even if you have previously created a key pair for another purpose,
-    it's best practice to create a dedicated pair for use with Anvil.
-     The process for creating key pairs is different between Windows and
-    Mac.  Follow the relevant guide below for your operating system.
-    1.  [Creating SSH key pairs on Windows]({{< relref "creating_ssh_key_pairs_on_windows" >}})
-    2.  [Creating SSH key pairs on Mac]({{< relref "creating_ssh_key_pairs_on_mac" >}})
-
-3.  **Connect to the Anvil VPN**
-    The Anvil web portal is accessible from the Internet. On the other
-    hand, for security reasons, the Anvil instances are not generally
-    accessible from the Internet. In order to access the instances from
-    on and off-campus, you will need to first be connected to the Anvil
-    VPN. Follow the instructions below to connect.
-    1.  [Connecting to the Anvil VPN]({{< relref "connecting_to_the_anvil_vpn" >}})
-
-4.  **Add the SSH Key Pair to your account**
-    Before creating your first instance, you'll need to associate the
-    SSH key created in step 2 with your account.   Follow the guide
-    below to login to the web dashboard and add the key pair.
-    1.  [Adding SSH Key Pairs]({{< relref "adding_ssh_key_pairs" >}})
-
-5.  **Create an instance**
-    Once the setup steps above are completed, you can create an
-    instance within the web dashboard.  Follow the guide below to create
-    an instance.
-    1.  [Creating an Instance]({{< relref "creating_an_instance" >}})
-
-6.  **Connect to your instance**  
-    After an instance has been created, you can connect (login) and
-    begin to use it.  Connecting is done via SSH or X2Go for Linux
-    instances and via Remote Desktop (RDP) for Windows instances.
-     Follow the relevant guide below for your instance and the type of
-    OS you're connecting from.
-    1.  [Connecting to Windows Instances]({{< relref "connecting_to_windows_instances" >}})
-    2.  [Connecting to Linux Instances via SSH from Mac]({{< relref "connecting_to_linux_instances_from_mac" >}})
-    3.  [Connecting to Linux instances via SSH from Windows]({{< relref "connecting_to_linux_instances_from_windows" >}})
-    4.  [Connecting to Linux instances using X2Go (for images with Xfce)]({{< relref "connecting_to_linux_instances_using_x2go" >}})
-
-7.  **Create and attach a volume to your instance (optional)**
-    Volumes are a means within OpenStack for persistent storage.  When
-    an instance is destroyed, all data that was placed on the OS hard
-    drive is lost.  A volume can be thought of similar to an external
-    hard drive.  It can be attached and detached from an instance as
-    needed.  Data on the volume will persist until the volume itself is
-    destroyed.  Creating a volume is an optional step, but may be useful
-    in certain cases.  The process of creating and attaching a volume
-    from the web dashboard is the same regardless of the type (Linux or
-    Windows) of instance it will be attached to.  Once the volume is
-    attached, follow the corresponding guide for your instance's OS to
-    format and make the volume usable within your instance.
-    1.  [Creating and attaching a volume]({{< relref "creating_and_attaching_a_volume" >}})
-    2.  [Formatting and mounting a volume in Windows]({{< relref "formatting_and_mounting_a_volume_in_windows" >}})
-    3.  [Formatting and mounting a volume in Linux]({{< relref "formatting_and_mounting_a_volume_in_linux" >}})
-
-8.  **Transferring files to or from your instance (optional)**
-    Transferring files to or from an instance is similar to doing so
-    with a personal laptop or workstation.  To transfer between an
-    instance and another HCC resource, both SCP and [Globus
-    Connect]({{< relref "guides/handling_data/globus_connect" >}}) can be used.  For transferring
-    between an instance and a laptop/workstation or another instance,
-    standard file sharing utilities such as Dropbox or Box can be used.
-     Globus may also be used, with one stipulation.  In order to
-    transfer files between two personal endpoints, a Globus Plus
-    subscription is required.  As part of HCC's Globus Provider Plan,
-    HCC can provide this on a per-user basis free of charge.  If you are
-    interested in Globus Plus, please email
-    {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)
-    with your request and a brief explanation.
-
-## Backups
-
-HCC creates daily backups of images and volume snapshots for disaster
-recovery. All users' images, detached volumes, and volume snapshots will
-be backed up on a daily basis. The ephemeral disks of VMs and attached
-volumes will NOT be backed up. If you would like your attached volumes
-to be backed up, make a snapshot by going to the “Volumes” tab, click
-the down arrow next to the button “Edit Volume” of the volume you want
-to make a snapshot, then, select “Create Snapshot”.
-
-Please note the backup function is for disaster recovery use only. HCC
-is unable to restore single files within instances.  Further, HCC's
-disaster recovery backups should not be the only source of backups for
-important data. The backup policies are subject to change without prior
-notice. To retrieve your backups, please contact HCC. If you have
-special concerns please contact us at
-{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu).
-
diff --git a/content/guides/handling_data/globus_connect/activating_hcc_cluster_endpoints.md b/content/guides/handling_data/globus_connect/activating_hcc_cluster_endpoints.md
deleted file mode 100644
index 697916a51fc43441cbb750bb1b544cf89666eb5d..0000000000000000000000000000000000000000
--- a/content/guides/handling_data/globus_connect/activating_hcc_cluster_endpoints.md
+++ /dev/null
@@ -1,39 +0,0 @@
-+++
-title = "Activating HCC Cluster Endpoints"
-description = "How to activate HCC endpoints on Globus"
-weight = 20
-+++
-
-You will not be able to transfer files to or from an HCC endpoint using Globus Connect without first activating the endpoint.  Endpoints are available for Crane (`hcc#crane`), Rhino, (`hcc#rhino`), and Attic (`hcc#attic`).  Follow the instructions below to activate any of these endpoints and begin making transfers.
-
-1.  [Sign in](https://www.globus.org/SignIn) to your Globus account using your campus credentials or your Globus ID (if you have one).  Then click on 'Endpoints' in the left sidebar.  
-{{< figure src="/images/Glogin.png" >}}    
-{{< figure src="/images/endpoints.png" >}}
-
-2.  Find the endpoint you want by entering '`hcc#crane`', '`hcc#rhino`', or '`hcc#attic`' in the search box and hit 'enter'.  Once you have found and selected the endpoint, click the green 'activate' icon. On the following page, click 'continue'.
-{{< figure src="/images/activateEndpoint.png" >}}
-{{< figure src="/images/EndpointContinue.png" >}}
-
-3.  You will be redirected to the HCC Globus Endpoint Activation page.  Enter your *HCC* username and password (the password you usually use to log into the HCC clusters).
-{{< figure src="/images/hccEndpoint.png" >}}
-
-4.  Next you will be prompted to
-    provide your *Duo* credentials.  If you use the Duo Mobile app on
-    your smartphone or tablet, select 'Duo Push'.  Once you approve the notification that is sent to your phone,
-    the activation will be complete.  If you use a Yubikey for
-    authentication, select the 'Passcode' option and then press your
-    Yubikey to complete the activation. Upon successful activation, you
-    will be redirected to your Globus *Manage Endpoints* page.  
-{{< figure src="/images/EndpointPush.png" >}}
-{{< figure src="/images/endpointComplete.png" >}}
-
-The endpoint should now be ready
-and will not have to be activated again for the next 7 days.  
-To transfer files between any two HCC clusters, you will need to
-activate both endpoints individually. 
-
-Next, learn how to [make file transfers between HCC endpoints]({{< relref "file_transfers_between_endpoints" >}}) or how to [transfer between HCC endpoints and a personal computer]({{< relref "file_transfers_to_and_from_personal_workstations" >}}).
-
----
-
- 
diff --git a/content/guides/handling_data/globus_connect/file_transfers_between_endpoints.md b/content/guides/handling_data/globus_connect/file_transfers_between_endpoints.md
deleted file mode 100644
index 83ad090c4223c1f4bb9dddd5aeb9b3fdcf8fe381..0000000000000000000000000000000000000000
--- a/content/guides/handling_data/globus_connect/file_transfers_between_endpoints.md
+++ /dev/null
@@ -1,50 +0,0 @@
-+++
-title = "File Transfers Between Endpoints"
-description = "How to transfer files between HCC clusters using Globus"
-weight = 30
-+++
-
-To transfer files between HCC clusters, you will first need to
-[activate]({{< relref "activating_hcc_cluster_endpoints" >}}) the
-two endpoints you would like to use (the available endpoints
-are: `hcc#crane` `hcc#rhino`, and `hcc#attic`).  Once
-that has been completed, follow the steps below to begin transferring
-files.  (Note: You can also transfer files between an HCC endpoint and
-any other Globus endpoint for which you have authorized access.  That
-may include a [personal
-endpoint]({{< relref "file_transfers_to_and_from_personal_workstations" >}}),
-a [shared
-endpoint]({{< relref "file_sharing" >}}),
-or an endpoint on another computing resource or cluster.  Once the
-endpoints have been activated, the file transfer process is generally
-the same regardless of the type of endpoints you use.  For demonstration
-purposes we use two HCC endpoints.)
-
-1.  Once both endpoints for the desired file transfer have been
-    activated, [sign in](https://www.globus.org/SignIn) to
-    your Globus account (if you are not already) and select
-	"Transfer or Sync to.." from the right sidebar. If you have
-	a small screen, you may have to click the menu icon 
-	first.
-{{< figure src="/images/Transfer.png">}}
-      
-2.  Enter the names of the two endpoints you would like to use, or
-    select from the drop-down menus (for
-    example, `hcc#attic` and `hcc#crane`).  Enter the
-    directory paths for both the source and destination (the 'from' and
-    'to' paths on the respective endpoints). Press 'Enter' to view files
-    under these directories.  Select the files or directories you would
-    like to transfer (press *shift* or *control* to make multiple
-    selections) and click the blue highlighted arrow to start the
-    transfer.  
-{{< figure src="/images/startTransfer.png" >}}
-    
-3.  Globus will display a message when your transfer has completed 
-	(or in the unlikely event that it was unsuccessful), and you will 
-	also receive an email. Select the 'refresh' icon to see your file
-	in the destination folder.
-{{< figure src="/images/transferComplete.png" >}}      
-
---- 
-
-
diff --git a/content/guides/handling_data/high_speed_data_transfers.md b/content/guides/handling_data/high_speed_data_transfers.md
deleted file mode 100644
index 922eb4b43902030f7a09eeb1f96332fea2bbe294..0000000000000000000000000000000000000000
--- a/content/guides/handling_data/high_speed_data_transfers.md
+++ /dev/null
@@ -1,28 +0,0 @@
-+++
-title = "High Speed Data Transfers"
-description = "How to transfer files directly from the transfer servers"
-weight = 10
-+++
-
-Crane, Rhino, and Attic each have a dedicated transfer server with
-10 Gb/s connectivity that allows
-for faster data transfers than the login nodes.  With [Globus
-Connect]({{< relref "globus_connect" >}}), users
-can take advantage of this connection speed when making large/cumbersome
-transfers.
-
-Those who prefer scp, sftp or
-rsync clients can also benefit from this high-speed connectivity by
-using these dedicated servers for data transfers:
-
-Cluster   | Transfer server
-----------|----------------------
-Crane     | `crane-xfer.unl.edu`
-Rhino     | `rhino-xfer.unl.edu`
-Attic     | `attic-xfer.unl.edu`
-
-{{% notice info %}}
-Because the transfer servers are login-disabled, third-party transfers
-between `crane-xfer`, and `attic-xfer` must be done via [Globus Connect]({{< relref "globus_connect" >}}).
-{{% /notice %}}
-
diff --git a/content/guides/handling_data/preventing_file_loss.md b/content/guides/handling_data/preventing_file_loss.md
deleted file mode 100644
index 8d20435f97caebbde64fc7aa792db534747d73ee..0000000000000000000000000000000000000000
--- a/content/guides/handling_data/preventing_file_loss.md
+++ /dev/null
@@ -1,170 +0,0 @@
-+++
-title = "Preventing File Loss"
-description = "How to prevent file loss on HCC clusters"
-weight = 60
-+++
-
-Each research group is allocated 50TB of storage in `/work` on HCC
-clusters. With over 400 active groups, HCC does not have the resources
-to provide regular backups of `/work` without sacrificing the
-performance of the existing filesystem. No matter how careful a user
-might be, there is always the risk of file loss due to user error,
-natural disasters, or equipment failure.  
-  
-However, there are a number of solutions available for backing up your
-data. By carefully considering the benefits and limitations of each,
-users can select the backup methods that work best for their particular
-needs. For truly robust file backups, we recommend combining multiple
-methods. For example, use Git regularly along with manual backups to an
-external hard-drive at regular intervals such as monthly or biannually.
-
----
-### 1. Use your local machine:
-
-If you have sufficient hard drive space, regularly backup your `/work`
-directories to your personal computer. To avoid filling up your personal
-hard-drives, consider using an external drive that can easily be placed
-in a fireproof safe or at an off-site location for an extra level of
-protection. To do this, you can either use [Globus
-Connect]({{< relref "globus_connect" >}}) or an
-SCP client, such
-as <a href="https://cyberduck.io/" class="external-link">Cyberduck</a> or <a href="https://winscp.net/eng/index.php" class="external-link">WinSCP</a>.
-For help setting up an SCP client, check out our [Quick Start
-Guides]({{< relref "/quickstarts" >}}).
-  
-For those worried about personal hard drive crashes, UNL
-offers <a href="http://nsave.unl.edu/" class="external-link">the backup service NSave</a>.
-For a small monthly fee, users can install software that will
-automatically backup selected files from their personal machine.  
-  
-Benefits:
-
--   Gives you full control over what is backed up and when.
--   Doesn't require the use of third party servers (when using SCP
-    clients).
--   Take advantage of our high speed data transfers (10 Gb/s) when using
-    Globus Connect or [setup your SCP client to use our dedicated high
-    speed transfer
-    servers]({{< relref "high_speed_data_transfers" >}})
-
-Limitations:
-
--   The amount you can backup is limited by available hard-drive space.
--   Manual backups of many files can be time consuming.
-
----
-### 2. Use Git to preserve files and revision history:
-
-Git is a revision control service which can be run locally or can be
-paired with a repository hosting service, such
-as <a href="http://www.github.com/" class="external-link">GitHub</a>, to
-provide a remote backup of your files. Git works best with smaller files
-such as source code and manuscripts. Anyone with an InCommon login can
-utilize <a href="http://git.unl.edu/" class="external-link">UNL's GitLab Instance</a>,
-for free.  
-  
-Benefits:
-
--   Git is naturally collaboration-friendly, allowing multiple people to
-    easily work on the same project and provides great built-in tools to
-    control contributions and managing conflicting changes.
--   Create individual repositories for each project, allowing you to
-    compartmentalize your work.
--   Using UNL's GitLab instance allows you to create private or internal
-    (accessible by anyone within your organization) repositories.
-
-Limitations:
-
--   Git is not designed to handle large files. GitHub does not allow
-    files larger than 100MB unless using
-    their <a href="https://help.github.com/articles/about-git-large-file-storage/" class="external-link">Git Large File Storage</a> and
-    tracking files over 1GB in size can be time consuming and lead to
-    errors when using other repository hosts.
-
----
-### 3. Use Attic:
-
-HCC offers
-long-term, <a href="https://en.wikipedia.org/wiki/Nearline_storage" class="external-link">near-line</a> data
-storage
-through [Attic]({{< relref "using_attic" >}}).
-HCC users with an existing account
-can <a href="http://hcc.unl.edu/attic" class="external-link">apply for an Attic account</a> for
-a <a href="http://hcc.unl.edu/priority-access-pricing" class="external-link">small annual fee</a> that
-is substantially less than other cloud services.  
-  
-Benefits:
-
--   Attic files are backed up regularly at both HCC locations in Omaha
-    and Lincoln to help provide disaster tolerance and a second security
-    layer against file loss.
--   No limits on individual or total file sizes.
--   High speed data transfers between Attic and the clusters when using
-    [Globus Connect]({{< relref "globus_connect" >}}) and [HCC's high-speed data
-    servers]({{< relref "high_speed_data_transfers" >}}).
-
-Limitations:
-
--   Backups must be done manually which can be time consuming. Setting
-    up automated scripts can help speed up this process.
-
----
-### 4. Use a cloud-based service, such as Box:
-
-Many of us are familiar with services such as Google Drive, Dropbox, Box
-and OneDrive. These cloud-based services provide a convenient portal for
-accessing your files from any computer. NU offers OneDrive and Box
-services to all students, staff and faculty. But did you know that you
-can link your Box account to HCC’s clusters to provide quick and easy
-access to files stored there?  [Follow a few set-up
-steps]({{< relref "integrating_box_with_hcc" >}}) and
-you can add files to and access files stored in your Box account
-directly from HCC clusters. Setup your submit scripts to automatically
-upload results as they are generated or use it interactively to store
-important workflow scripts and maintain a backup of your analysis
-results.  
-  
-Benefits:
-
--   <a href="http://box.unl.edu/" class="external-link">Box@UNL</a> offers
-    unlimited file storage while you are associated with UNL.
--   Integrating with HCC clusters provides a quick and easy way to
-    automate backups of analysis results and workflow scripts.
-
-Limitations:
-
--   Box has individual file size limitations, larger files will need to
-    be backed up using an alternate method.
-
----
-### 5. Copy important files to `/home`:
-
-While `/work` files and directories are not backed up, files and
-directories in `/home` are backed up on a daily basis. Due to the
-limitations of the `/home` filesystem, we strongly recommend that only
-source code and compiled programs are backed up to `/home`. If you do
-use `/home` to backup datasets, please keep a working copy in your
-`/work` directories to prevent negatively impacting the functionality of
-the cluster.  
-  
-Benefits:
-
--   No need to make manual backups. `\home` files are automatically backed
-    up daily.
--   Files in `/home` are not subject to the 6 month purge policy that
-    exists on `/work`.
--   Doesn't require the use of third-party software or tools.
-
-Limitations:
-
--   Home storage is limited to 20GB per user. Larger files sets will
-    need to be backed up using an alternate method.
--   Home is read-only on the cluster worker nodes so results cannot be
-    directly written or altered from within a submitted job.
-
-  
-If you would like more information or assistance in setting up any of
-these methods, contact us
-at <a href="mailto:hcc-support@unl.edu" class="external-link">hcc-support@unl.edu</a>. 
-
-
diff --git a/content/guides/handling_data/using_attic.md b/content/guides/handling_data/using_attic.md
deleted file mode 100644
index dc1c89a3e84cb10b31b0723cb5e6a605a864d556..0000000000000000000000000000000000000000
--- a/content/guides/handling_data/using_attic.md
+++ /dev/null
@@ -1,105 +0,0 @@
-+++
-title = "Using Attic"
-description = "How to store data on Attic"
-weight = 20
-+++
-
-For users who need long-term storage for large amount of data, HCC
-provides an economical solution called Attic.  Attic is a reliable
-<a href="https://en.wikipedia.org/wiki/Nearline_storage" class="external-link">near-line data archive</a> storage
-system. The files in Attic can be accessed and shared from anywhere
-using [Globus
-Connect]({{< relref "globus_connect" >}}),
-with a fast 10Gb/s link.  Also, the data in Attic is backed up between
-our Lincoln and Omaha facilities to ensure high availability and
-disaster tolerance. The data and user activities on Attic are subject to
-our
-<a href="http://hcc.unl.edu/hcc-policies" class="external-link">HCC Policies</a>.
-
----
-### Accounts and Cost
-
-To use Attic you will first need an
-<a href="https://hcc.unl.edu/new-user-request" class="external-link">HCC account</a>, and
-then you may request an
-<a href="http://hcc.unl.edu/attic" class="external-link">Attic allocation</a>.
-We charge a small fee per TB per year, but it is cheaper than most
-commercial cloud storage solutions.  For the user application form and
-cost, please see the
-<a href="http://hcc.unl.edu/attic" class="external-link">HCC Attic page</a>.
-
----
-### Transfer Files Using Globus Connect
-
-The easiest and fastest way to access Attic is via Globus. You can
-transfer files between your computer, our clusters ($HOME, $WORK, and $COMMON on
-Crane or Rhino), and Attic. Here is a detailed tutorial on
-how to set up and use [Globus Connect]({{< relref "globus_connect" >}}). For
-Attic, use the Globus Endpoint **hcc\#attic**.  Your Attic files are
-located at `~, `which is a shortcut
-for `/attic/<groupname>/<username>`.  
-**Note:** *If you are accessing Attic files from your supplementary
-group, you should explicitly set the path to
-/attic/&lt;supplementary\_groupname&gt;/. If you don't do that, by
-default the endpoint will try to place you in your primary group's Attic
-path, to which access will be denied if the primary group doesn't have an Attic allocation.*
-
----
-### Transfer Files Using SCP/SFTP/RSYNC
-
-The transfer server for Attic storage is `attic.unl.edu` (or `attic-xfer.unl.edu`).
-
-{{% panel theme="info" header="SCP Example" %}}
-{{< highlight bash >}}
-scp /source/file <username>@attic.unl.edu:~/destination/file
-{{< /highlight >}}
-{{% /panel %}}
-
-{{% panel theme="info" header="SFTP Example" %}}
-{{< highlight bash >}}
-sftp <username>@attic.unl.edu
-Password:
-Duo two-factor login for <username>
-Connected to attic.unl.edu.
-sftp> pwd
-Remote working directory: /attic/<groupname>/<username>
-sftp> put source/file destination/file
-sftp> exit
-{{< /highlight >}}
-{{% /panel %}}
-
-{{% panel theme="info" header="RSYNC Example" %}}
-{{< highlight bash >}}
-# local to remote rsync command
-rsync -avz /local/source/path <username>@attic.unl.edu:remote/destination/path
-
-# remote to local rsync command
-rsync -avz <username>@attic.unl.edu:remote/source/path /local/destination/path
-{{< /highlight >}}
-{{% /panel %}}
-
-You can also access your data on Attic using our [high-speed
-transfer servers]({{< relref "high_speed_data_transfers" >}}) if you prefer.
-Simply use scp or sftp to connect to one of the transfer servers, and
-your directory is mounted at `/attic/<groupname>/<username>`.
-
----
-### Check Attic Usage
-
-The usage and quota information for your group and the users in the
-group are stored in a file named "disk\_usage.txt" in your group's
-directory (`/attic/<groupname>`). You can use either [Globus Connect]({{< relref "globus_connect" >}}) or
-scp to download it.  Your usage and expiration is also shown in the web
-interface (see below).
-
----
-### Use the web interface
-
-For convenience, a web interface is also provided.  Simply go to
-<a href="https://attic.unl.edu" class="external-link">https://attic.unl.edu</a>
-and login with your HCC credentials.  Using this interface, you can see
-your quota usage and expiration, manage files, etc.  **Please note we do
-not recommend uploading/downloading large files this way**.  Use one of
-the other transfer methods above for large datasets.
-
-
diff --git a/content/guides/running_applications/Jupyter.md b/content/guides/running_applications/Jupyter.md
deleted file mode 100644
index 442133957028bc65e34a1beefac28eb30ea0cc3b..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/Jupyter.md
+++ /dev/null
@@ -1,58 +0,0 @@
-+++
-title = "Jupyter Notebooks on Crane"
-description = "How to access and use a Jupyter Notebook"
-weight = 20
-+++
-
-- [Connecting to Crane] (#connecting-to-crane)
-- [Running Code] (#running-code)
-- [Opening a Terminal] (#opening-a-terminal)
-- [Using Custom Packages] (#using-custom-packages)
-
-## Connecting to Crane
------------------------
- Jupyter defines it's notebooks ("Jupyter Notebooks") as 
-	an open-source web application that allows you to create and share documents that contain live code,
-	equations, visualizations and narrative text. Uses include: data cleaning and transformation, numerical simulation,
-	statistical modeling, data visualization, machine learning, and much more.
-
-1.  To open a Jupyter notebook, [Sign in](https://crane.unl.edu) to crane.unl.edu using your hcc credentials (NOT your 
-	campus credentials).
-{{< figure src="/images/jupyterLogin.png" >}}
-
-2.	Select your preferred authentication method.
-
-	{{< figure src="/images/jupyterPush.png" >}}  
-
-3.	Choose a job profile. Select "Noteboook via SLURM Job | Small (1 core, 4GB RAM, 8 hours)" for light tasks such as debugging or small-scale testing.
-Select the other options based on your computing needs. Note that a SLURM Job will save to your "work" directory.
-
-{{< figure src="/images/jupyterjob.png" >}}
-
-## Running Code
-
-1.  Select the "New" dropdown menu and select the file type you want to create.   
-
-{{< figure src="/images/jupyterNew.png" >}}
-2.	A new tab will open, where you can enter your code. Run your code by selecting the "play" icon.
-
-{{< figure src="/images/jupyterCode.png">}}
-
-## Opening a Terminal
-
-1.	From your user home page, select "terminal" from the "New" drop-down menu.
-{{< figure src="/images/jupyterTerminal.png">}}
-2.	A terminal opens in a new tab. You can enter [Linux commands] ({{< relref "basic_linux_commands" >}})
- at the prompt.
-{{< figure src="/images/jupyterTerminal2.png">}}
-
-## Using Custom Packages
-
-Many popular `python` and `R` packages are already installed and available within Jupyter Notebooks. 
-However, it is possible to install custom packages to be used in notebooks by creating a custom Anaconda 
-Environment. Detailed information on how to create such an environment can be found at
- [Using an Anaconda Environment in a Jupyter Notebook on Crane]({{< relref "using_anaconda_package_manager/#using-an-anaconda-environment-in-a-jupyter-notebook-on-crane" >}}).
-
----
-
- 
diff --git a/content/guides/running_applications/_index.md b/content/guides/running_applications/_index.md
deleted file mode 100644
index e4962e0a4c9104fb1eaa3afa4bd242c297abc8d2..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/_index.md
+++ /dev/null
@@ -1,7 +0,0 @@
-+++
-title = "Running Applications"
-description = "How to run various applications on HCC resources."
-weight = "20"
-+++
-
-{{% children %}}
diff --git a/content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md b/content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md
deleted file mode 100644
index ae01912ee4795c0040d891b2c657c38e927c7148..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md
+++ /dev/null
@@ -1,65 +0,0 @@
-+++
-title = "BLAST with Allinea Performance Reports"
-description = "Example of how to profile BLAST using Allinea Performance Reports."
-+++
-
-Simple example of using
-[BLAST]({{< relref "/guides/running_applications/bioinformatics_tools/alignment_tools/blast/running_blast_alignment" >}}) 
-with Allinea Performance Reports (`perf-report`) on Crane is shown below:
-
-{{% panel theme="info" header="blastn_perf_report.submit" %}}
-{{< highlight batch >}}
-#!/bin/sh
-#SBATCH --job-name=BlastN
-#SBATCH --nodes=1
-#SBATCH --ntasks=16
-#SBATCH --time=20:00:00
-#SBATCH --mem=50gb
-#SBATCH --output=BlastN.info
-#SBATCH --error=BlastN.error
-
-module load allinea
-module load blast/2.2.29
-
-cd $WORK/<project_folder>
-cp -r /work/HCC/DATA/blastdb/nt/ /tmp/
-cp input_reads.fasta /tmp/
-
-perf-report --openmp-threads=$SLURM_NTASKS_PER_NODE --nompi `which blastn` \
--query /tmp/input_reads.fasta -db /tmp/nt/nt -out \
-blastn_output.alignments -num_threads $SLURM_NTASKS_PER_NODE
-
-cp blastn\_output.alignments .
-{{< /highlight >}}
-{{% /panel %}}
-
-BLAST uses OpenMP and therefore the Allinea Performance Reports options
-`--openmp-threads` and `--nompi` are used. The perf-report
-part, `perf-report --openmp-threads=$SLURM_NTASKS_PER_NODE --nompi`,
-is placed in front of the actual `blastn` command we want
-to analyze.
-
-{{% notice info %}}
-If you see the error "**Allinea Performance Reports - target file
-'application' does not exist on this machine... exiting**", this means
-that instead of just using the executable '*application*', the full path
-to that application is required. This is the reason why in the script
-above, instead of using "*blastn*", we use *\`which blastn\`* which
-gives the full path of the *blastn* executable.
-{{% /notice %}}
-
-When the application finishes, the performance report is generated in
-the working directory.
-For the executed application, this is how the report looks like:
-
-{{< figure src="/images/11635296.png" width="850" >}}
-
-From the report, we can see that **blastn** is Compute-Bound
-application. The difference between mean (11.1 GB) and peak (26.3 GB)
-memory is significant, and this may be sign of workload imbalance or a
-memory leak. Moreover, 89.6% of the time is spent in synchronizing
-threads in parallel regions which can lead to workload imbalance.
-
-Running Allinea Performance Reports and identifying application
-bottlenecks is really useful for improving the application and better
-utilization of the available resources.
diff --git a/content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md b/content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md
deleted file mode 100644
index 09fab74bcf78dd57f6f5d8213bb20d9a716adfcf..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md
+++ /dev/null
@@ -1,44 +0,0 @@
-+++
-title = "Ray with Allinea Performance Reports"
-description = "Example of how to profile Ray using Allinea Performance Reports"
-+++
-
-Simple example of using [Ray]({{< relref "/guides/running_applications/bioinformatics_tools/de_novo_assembly_tools/ray" >}})
-with Allinea PerformanceReports (`perf-report`) on Tusker is shown below:
-
-{{% panel theme="info" header="ray_perf_report.submit" %}}
-{{< highlight batch >}}
-#!/bin/sh
-#SBATCH --job-name=Ray
-#SBATCH --ntasks-per-node=16
-#SBATCH --time=10:00:00
-#SBATCH --mem=70gb
-#SBATCH --output=Ray.info
-#SBATCH --error=Ray.error
-
-module load allinea
-module load compiler/gcc/4.7 openmpi/1.6 ray/2.3
-
-perf-report mpiexec -n 16 Ray -k 31 -p -p input_reads_pair_1.fasta input_reads\_pair_2.fasta -o output_directory
-{{< /highlight >}}
-{{% /panel %}}
-
-Ray is MPI and therefore additional Allinea Performance Reports options
-are not required. The `perf-report` command is placed in front of the
-actual `Ray` command we want to analyze.
-
-When the application finishes, the performance report is generated in
-the working directory.
-For the executed application, this is how the report looks like:
-
-{{< figure src="/images/11635303.png" width="850" >}}
-
-From the report, we can see that **Ray **is Compute-Bound application.
-Most of the running time is spent in point-to-point calls with a low
-transfer rate which may be caused by inefficient message sizes.
-Therefore, running this application with fewer MPI processes and more
-data on each process may be more efficient.
-
-Running Allinea Performance Reports and identifying application
-bottlenecks is really useful for improving the application and better
-utilization of the available resources.
diff --git a/content/guides/running_applications/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md b/content/guides/running_applications/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
deleted file mode 100644
index 4024fe76fff77a3f8b5e21ef732fad3c8297e642..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md
+++ /dev/null
@@ -1,124 +0,0 @@
-+++
-title = " Running BLAST Alignment"
-description =  "How to run BLAST alignment on HCC resources"
-weight = "10"
-+++
-
-
-Basic BLAST has the following commands:
-
-- **blastn**: search nucleotide database using a nucleotide query
-- **blastp**: search protein database using a protein query
-- **blastx**: search protein database using a translated nucleotide query
-- **tblastn**: search translated nucleotide database using a protein query
-- **tblastx**: search translated nucleotide database using a translated nucleotide query
-
-
-The basic usage of **blastn** is:
-{{< highlight bash >}}
-$ blastn -query input_reads.fasta -db input_reads_db -out blastn_output.alignments [options]
-{{< /highlight >}}
-where **input_reads.fasta** is an input file of sequence data in fasta format, **input_reads_db** is the generated BLAST database, and **blastn_output.alignments** is the output file where the alignments are stored.
-
-Additional parameters can be found in the [BLAST manual] (https://www.ncbi.nlm.nih.gov/books/NBK279690/), or by typing:
-{{< highlight bash >}}
-$ blastn -help
-{{< /highlight >}}
-
-These BLAST alignment commands are multi-threaded, and therefore using the BLAST option **-num_threads <number_of_CPUs>** is recommended.
-
-
-HCC hosts multiple BLAST databases and indices on Crane. In order to use these resources, the ["biodata" module] ({{<relref "/guides/running_applications/bioinformatics_tools/biodata_module">}}) needs to be loaded first. The **$BLAST** variable contains the following currently available databases:
-
-- **16SMicrobial**
-- **env_nt**
-- **est**
-- **est_human**
-- **est_mouse**
-- **est_others**
-- **gss**
-- **human_genomic**
-- **human_genomic_transcript**
-- **mouse_genomic_transcript**
-- **nr**
-- **nt**
-- **other_genomic**
-- **refseq_genomic**
-- **refseq_rna**
-- **sts**
-- **swissprot**
-- **tsa_nr**
-- **tsa_nt**
-
-If you want to create and use a BLAST database that is not mentioned above, check [Create Local BLAST Database]({{<relref "create_local_blast_database" >}}).
-
-
-Basic SLURM example of nucleotide BLAST run against the non-redundant **nt** BLAST database with `8 CPUs` is provided below. When running BLAST alignment, it is recommended to first copy the query and database files to the **/scratch/** directory of the worker node. Moreover, the BLAST output is also saved in this directory (**/scratch/blastn_output.alignments**). After BLAST finishes, the output file is copied from the worker node to your current work directory.
-{{% notice info %}}
-**Please note that the worker nodes can not write to the */home/* directories and therefore you need to run your job from your */work/* directory.**
-**This example will first copy your database to faster local storage called “scratch”.  This can greatly improve performance!**
-{{% /notice %}}
-
-{{% panel header="`blastn_alignment.submit`"%}}
-{{< highlight bash >}}
-#!/bin/sh
-#SBATCH --job-name=BlastN
-#SBATCH --nodes=1
-#SBATCH --ntasks-per-node=8
-#SBATCH --time=168:00:00
-#SBATCH --mem=20gb
-#SBATCH --output=BlastN.%J.out
-#SBATCH --error=BlastN.%J.err
-
-module load blast/2.7
-module load biodata/1.0
-
-cd $WORK/<project_folder>
-cp $BLAST/nt.* /scratch/
-cp input_reads.fasta /scratch/
-
-blastn -query /scratch/input_reads.fasta -db /scratch/nt -out /scratch/blastn_output.alignments -num_threads $SLURM_NTASKS_PER_NODE
-
-cp /scratch/blastn_output.alignments $WORK/<project_folder>
-{{< /highlight >}}
-{{% /panel %}}
-
-
-One important BLAST parameter is the **e-value threshold** that changes the number of hits returned by showing only those with value lower than the given. To show the hits with **e-value** lower than 1e-10, modify the given script as follows:
-{{< highlight bash >}}
-$ blastn -query input_reads.fasta -db input_reads_db -out blastn_output.alignments -num_threads $SLURM_NTASKS_PER_NODE -evalue 1e-10
-{{< /highlight >}}
-
-
-The default BLAST output is in pairwise format. However, BLAST’s parameter **-outfmt** supports output in [different formats] (https://www.ncbi.nlm.nih.gov/books/NBK279684/) that are easier for parsing.
-
-
-Basic SLURM example of protein BLAST run against the non-redundant **nr **BLAST database with tabular output format and `8 CPUs` is shown below. Similarly as before, the query and database files are copied to the **/scratch/** directory. The BLAST output is also saved in this directory (**/scratch/blastx_output.alignments**). After BLAST finishes, the output file is copied from the worker node to your current work directory.
-{{% notice info %}}
-**Please note that the worker nodes can not write to the */home/* directories and therefore you need to run your job from your */work/* directory.**
-**This example will first copy your database to faster local storage called “scratch”.  This can greatly improve performance!**
-{{% /notice %}}
-
-{{% panel header="`blastx_alignment.submit`"%}}
-{{< highlight bash >}}
-#!/bin/sh
-#SBATCH --job-name=BlastX
-#SBATCH --nodes=1
-#SBATCH --ntasks-per-node=8
-#SBATCH --time=168:00:00
-#SBATCH --mem=20gb
-#SBATCH --output=BlastX.%J.out
-#SBATCH --error=BlastX.%J.err
-
-module load blast/2.7
-module load biodata/1.0
-
-cd $WORK/<project_folder>
-cp $BLAST/nr.* /scratch/
-cp input_reads.fasta /scratch/
-
-blastx -query /scratch/input_reads.fasta -db /scratch/nr -outfmt 6 -out /scratch/blastx_output.alignments -num_threads $SLURM_NTASKS_PER_NODE
-
-cp /scratch/blastx_output.alignments $WORK/<project_folder>
-{{< /highlight >}}
-{{% /panel %}}
diff --git a/content/guides/running_applications/bioinformatics_tools/biodata_module.md b/content/guides/running_applications/bioinformatics_tools/biodata_module.md
deleted file mode 100644
index 9b9ca690ffa288040d85d28756c011da2981f32f..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/bioinformatics_tools/biodata_module.md
+++ /dev/null
@@ -1,88 +0,0 @@
-+++
-title = "Biodata Module"
-description = "How to use Biodata Module on HCC machines"
-scripts = ["https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/jquery.tablesorter.min.js", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-pager.min.js","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-filter.min.js","/js/sort-table.js"]
-css = ["http://mottie.github.io/tablesorter/css/theme.default.css","https://mottie.github.io/tablesorter/css/theme.dropbox.css", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/jquery.tablesorter.pager.min.css","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/filter.formatter.min.css"]
-weight = "52"
-+++
-
-
-HCC hosts multiple databases (BLAST, KEGG, PANTHER, InterProScan), genome files, short read aligned indices etc. on Crane.  
-In order to use these resources, the "**biodata**" module needs to be loaded first.  
-For how to load module, please check [Module Commands]({{< relref "module_commands" >}}).
-
-Loading the "**biodata**" module will pre-set many environment variables, but most likely you will only need a subset of them. Environment variables can be used in your command or script by prefixing `$` to the name.
-
-The major environment variables are:  
-**$DATA** - main directory  
-**$BLAST** - Directory containing all available BLAST (nucleotide and protein) databases  
-**$KEGG** - KEGG database main entry point (requires license)  
-**$PANTHER** - PANTHER database main entry point (latest)  
-**$IPR** - InterProScan database main entry point (latest)  
-**$GENOMES** - Directory containing all available genomes (multiple sources, builds possible  
-**$INDICES** - Directory containing indices for bowtie, bowtie2, bwa for all available genomes  
-**$UNIPROT** - Directory containing latest release of full UniProt database
-
-
-In order to check what genomes are available, you can type:
-{{< highlight bash >}}
-$ ls $GENOMES
-{{< /highlight >}}
-
-
-In order to check what BLAST databases are available, you can just type:
-{{< highlight bash >}}
-$ ls $BLAST
-{{< /highlight >}}
-
-
-An example of how to run Bowtie2 local alignment on Crane utilizing the default Horse, *Equus caballus* index (*BOWTIE2\_HORSE*) with paired-end fasta files and 8 CPUs is shown below:
-{{% panel header="`bowtie2_alignment.submit`"%}}
-{{< highlight bash >}}
-#!/bin/sh
-#SBATCH --job-name=Bowtie2
-#SBATCH --nodes=1
-#SBATCH --ntasks-per-node=8
-#SBATCH --time=168:00:00
-#SBATCH --mem=10gb
-#SBATCH --output=Bowtie2.%J.out
-#SBATCH --error=Bowtie2.%J.err
-
-module load bowtie/2.2
-module load biodata
-
-bowtie2 -x $BOWTIE2_HORSE -f -1 input_reads_pair_1.fasta -2 input_reads_pair_2.fasta -S bowtie2_alignments.sam --local -p $SLURM_NTASKS_PER_NODE
-
-{{< /highlight >}}
-{{% /panel %}}
-
-
-An example of BLAST run against the non-redundant nucleotide database available on Crane is provided below:
-{{% panel header="`blastn_alignment.submit`"%}}
-{{< highlight bash >}}
-#!/bin/sh
-#SBATCH --job-name=BlastN
-#SBATCH --nodes=1
-#SBATCH --ntasks-per-node=8
-#SBATCH --time=168:00:00
-#SBATCH --mem=10gb
-#SBATCH --output=BlastN.%J.out
-#SBATCH --error=BlastN.%J.err
-
-module load blast/2.7
-module load biodata
-cp $BLAST/nt.* /scratch
-cp input_reads.fasta /scratch
-
-blastn -db /scratch/nt -query /scratch/input_reads.fasta -out /scratch/blast_nucleotide.results
-cp /scratch/blast_nucleotide.results .
-
-{{< /highlight >}}
-{{% /panel %}}
-
-
-### Available Organisms
-
-The organisms and their appropriate environmental variables for all genomes and chromosome files, as well as indices are shown in the table below.
-
-{{< table url="http://rhino-head.unl.edu:8192/bio/data/json" >}}
diff --git a/content/guides/running_applications/dmtcp_checkpointing.md b/content/guides/running_applications/dmtcp_checkpointing.md
deleted file mode 100644
index 5fca1a4c7816922840533973167b677822aa44a1..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/dmtcp_checkpointing.md
+++ /dev/null
@@ -1,138 +0,0 @@
-+++
-title = "DMTCP Checkpointing"
-description = "How to use the DMTCP utility to checkpoint your application."
-+++
-
-[DMTCP](http://dmtcp.sourceforge.net)
-(Distributed MultiThreaded Checkpointing) is a checkpointing package for
-applications. Using checkpointing allows resuming of a failing
-simulation due to failing resources (e.g. hardware, software, exceeded
-time and memory resources).
-
-DMTCP supports both sequential and multi-threaded applications. Some
-examples of binary programs on Linux distributions that can be used with
-DMTCP are OpenMP, MATLAB, Python, Perl, MySQL, bash, gdb, X-Windows etc.
-
-DMTCP provides support for several resource managers, including SLURM,
-the resource manager used in HCC. The DMTCP module is available both on
-Crane, and is enabled by typing:
-
-{{< highlight bash >}}
-module load dmtcp
-{{< /highlight >}}
-  
-After the module is loaded, the first step is to run the command:
-
-{{< highlight bash >}}
-[<username>@login.crane ~]$ dmtcp_launch --new-coordinator --rm --interval <interval_time_seconds> <your_command>
-{{< /highlight >}}
-
-where `--rm` option enables SLURM support,
-**\<interval_time_seconds\>** is the time in seconds between
-automatic checkpoints, and **\<your_command\>** is the actual
-command you want to run and checkpoint.
-
-Beside the general options shown above, more `dmtcp_launch` options
-can be seen by using:
-
-{{< highlight bash >}}
-[<username>@login.crane ~]$ dmtcp_launch --help
-{{< /highlight >}}
-
-`dmtcp_launch` creates few files that are used to resume the
-cancelled job, such as *ckpt\_\*.dmtcp* and
-*dmtcp\_restart\_script\*.sh*. Unless otherwise stated
-(using `--ckptdir` option), these files are stored in the current
-working directory.
-
-  
-The second step of DMTCP is to restart the cancelled job, and there are
-two ways of doing that:
-
--   `dmtcp_restart ckpt_*.dmtcp` *\<options\>* (before running
-    this command delete any old *ckp\_\*.dmtcp* files in your current
-    directory)
-
--   `./dmtcp_restart_script.sh` *\<options\>*
-
-If there are no options defined in the *&lt;options&gt;* field, DMTCP
-will keep running with the options defined in the initial
-**dmtcp\_launch** call (such as interval time, output directory etc).
-
-  
-Simple example of using DMTCP with
-[BLAST]({{< relref "/guides/running_applications/bioinformatics_tools/alignment_tools/blast/running_blast_alignment" >}})
-on crane is shown below:
-
-{{% panel theme="info" header="dmtcp_blastx.submit" %}}
-{{< highlight batch >}}
-#!/bin/sh
-#SBATCH --job-name=BlastX
-#SBATCH --nodes=1
-#SBATCH --ntasks=8
-#SBATCH --time=50:00:00
-#SBATCH --mem=20gb
-#SBATCH --output=BlastX_info_1.txt
-#SBATCH --error=BlastX_error_1.txt
- 
-module load dmtcp
-module load blast/2.4
-
-cd $WORK/<project_folder>
-cp -r /work/HCC/DATA/blastdb/nr/ /tmp/  
-cp input_reads.fasta /tmp/
-
-dmtcp_launch --new-coordinator --rm --interval 3600 blastx -query \
-/tmp/input_reads.fasta -db /tmp/nr/nr -out blastx_output.alignments \
--num_threads $SLURM_NTASKS_PER_NODE
-{{< /highlight >}}
-{{% /panel %}}
-
-In this example, DMTCP takes checkpoints every hour (`--interval 3600`),
-and the actual command we want to checkpoint is `blastx` with
-some general BLAST options defined with `-query`, `-db`, `-out`,
-`-num_threads`.
-
-If this job is killed for various reasons, it can be restarted using the
-following submit file:
-
-{{% panel theme="info" header="dmtcp_restart_blastx.submit" %}}
-{{< highlight batch >}}
-#!/bin/sh
-#SBATCH --job-name=BlastX
-#SBATCH --nodes=1
-#SBATCH --ntasks=8
-#SBATCH --time=50:00:00
-#SBATCH --mem=20gb
-#SBATCH --output=BlastX_info_2.txt
-#SBATCH --error=BlastX_error_2.txt
-
-module load dmtcp
-module load blast/2.4
-
-cd $WORK/<project_folder>
-cp -r /work/HCC/DATA/blastdb/nr/ /tmp/
-cp input_reads.fasta /tmp/
-
-# Start DMTCP
-dmtcp_coordinator --daemon --port 0 --port-file /tmp/port
-export DMTCP_COORD_HOST=`hostname`
-export DMTCP_COORD_PORT=$(</tmp/port)
-
-# Restart job 
-./dmtcp_restart_script.sh
-{{< /highlight >}}
-{{% /panel %}}
-
-{{% notice info %}}
-`dmtcp_restart` generates new
-`ckpt_*.dmtcp` and `dmtcp_restart_script*.sh` files. Therefore, if
-the restarted job is also killed due to unavailable/exceeded resources,
-you can resubmit the same job again without any changes in the submit
-file shown above (just don't forget to delete the old `ckpt_*.dmtcp`
-files if you are using these files instead of `dmtcp_restart_script.sh`)
-{{% /notice %}}
-  
-Even though DMTCP tries to support most mainstream and commonly used
-applications, there is no guarantee that every application can be
-checkpointed and restarted.
diff --git a/content/guides/running_applications/fortran_c_on_hcc.md b/content/guides/running_applications/fortran_c_on_hcc.md
deleted file mode 100644
index 1ed6a62ac0fa388134f3596cff1b4e340946d33a..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/fortran_c_on_hcc.md
+++ /dev/null
@@ -1,219 +0,0 @@
-+++
-title = "Fortran/C on HCC"
-description = "How to compile and run Fortran/C program on HCC machines"
-weight = "50"
-+++
-
-This quick start demonstrates how to implement a Fortran/C program on
-HCC supercomputers. The sample codes and submit scripts can be
-downloaded from [serial_dir.zip](/attachments/serial_dir.zip).
-
-#### Login to a HCC Cluster
-
-Log in to a HCC cluster through PuTTY ([For Windows Users]({{< relref "/quickstarts/connecting/for_windows_users">}})) or Terminal ([For Mac/Linux
-Users]({{< relref "/quickstarts/connecting/for_maclinux_users">}})) and make a subdirectory called `serial_dir` under the `$WORK` directory. 
-
-{{< highlight bash >}}
-$ cd $WORK
-$ mkdir serial_dir
-{{< /highlight >}}
-
-In the subdirectory `serial_dir`, save all the relevant Fortran/C codes. Here we include two demo
-programs, `demo_f_serial.f90` and `demo_c_serial.c`, that compute the sum from 1 to 20. 
-
-{{%expand "demo_f_serial.f90" %}}
-{{< highlight bash >}}
-Program demo_f_serial
-    implicit none
-    integer, parameter :: N = 20
-    real*8 w
-    integer i
-    common/sol/ x
-    real*8 x
-    real*8, dimension(N) :: y
-    do i = 1,N
-        w = i*1d0
-        call proc(w)
-        y(i) = x
-        write(6,*) 'i,x = ', i, y(i)
-    enddo
-    write(6,*) 'sum(y) =',sum(y)
-Stop
-End Program
-Subroutine proc(w)
-    real*8, intent(in) :: w
-    common/sol/ x
-    real*8 x
-    x = w
-Return
-End Subroutine
-{{< /highlight >}}
-{{% /expand %}}
-
-
-{{%expand "demo_c_serial.c" %}}
-{{< highlight c >}}
-//demo_c_serial
-#include <stdio.h>
-
-double proc(double w){
-        double x;
-        x = w;
-        return x;
-}
-
-int main(int argc, char* argv[]){
-    int N=20;
-    double w;
-    int i;
-    double x;
-    double y[N];
-    double sum;
-    for (i = 1; i <= N; i++){        
-        w = i*1e0;
-        x = proc(w);
-        y[i-1] = x;
-        printf("i,x= %d %lf\n", i, y[i-1]) ;
-    }
-    
-    sum = 0e0;
-    for (i = 1; i<= N; i++){
-        sum = sum + y[i-1]; 
-    }
-    
-    printf("sum(y)= %lf\n", sum);  
- 
-return 0; 
-}
-{{< /highlight >}}
-{{% /expand %}}
-
----
-
-#### Compiling the Code
-
-The compiling of a Fortran/C++ code to executable is usually done behind
-the scene in a Graphical User Interface (GUI) environment, such as
-Microsoft Visual Studio. In a HCC cluster, the compiling is done
-explicitly by first loading a choice compiler and then executing the
-corresponding compiling command. Here we will use the GNU Complier
-Collection, `gcc`, for demonstration. Other available compilers such as
-`intel` or `pgi` can be looked up using the command
-line `module avail`.  Before compiling the code, make sure there is no 
-dependency on any numerical library in the code. If invoking a numerical 
-library is necessary, contact a HCC specialist 
-({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)) to
-discuss implementation options.
-
-{{< highlight bash >}}
-$ module load compiler/gcc/8.2
-$ gfortran demo_f_serial.f90 -o demo_f_serial.x
-$ gcc demo_c_serial.c -o demo_c_serial.x
-{{< /highlight >}}
-
-The above commends load the `gcc` complier and use the compiling
-commands `gfortran` or `gcc` to compile the codes to`.x` files
-(executables). 
-
-#### Creating a Submit Script
-
-Create a submit script to request one core (default) and 1-min run time
-on the supercomputer. The name of the main program enters at the last
-line.
-
-{{% panel header="`submit_f.serial`"%}}
-{{< highlight bash >}}
-#!/bin/sh
-#SBATCH --mem-per-cpu=1024
-#SBATCH --time=00:01:00
-#SBATCH --job-name=Fortran
-#SBATCH --error=Fortran.%J.err
-#SBATCH --output=Fortran.%J.out
-
-module load compiler/gcc/4.9  
-./demo_f_serial.x
-{{< /highlight >}}
-{{% /panel %}}
-
-{{% panel header="`submit_c.serial`"%}}
-{{< highlight bash >}}
-#!/bin/sh
-#SBATCH --mem-per-cpu=1024
-#SBATCH --time=00:01:00
-#SBATCH --job-name=C
-#SBATCH --error=C.%J.err
-#SBATCH --output=C.%J.out
-
-module load compiler/gcc/4.9
-./demo_c_serial.x
-{{< /highlight >}}
-{{% /panel %}}
-
-#### Submit the Job
-
-The job can be submitted through the command `sbatch`. The job status
-can be monitored by entering `squeue` with the `-u` option.
-
-{{< highlight bash >}}
-$ sbatch submit_f.serial
-$ sbatch submit_c.serial
-$ squeue -u <username>
-{{< /highlight >}}
-
-Replace `<username>` with your HCC username.
-
-#### Sample Output
-
-The sum from 1 to 20 is computed and printed to the `.out` file (see
-below). 
-{{%expand "Fortran.out" %}}
-{{< highlight batchfile>}}
- i,x =            1   1.0000000000000000     
- i,x =            2   2.0000000000000000     
- i,x =            3   3.0000000000000000     
- i,x =            4   4.0000000000000000     
- i,x =            5   5.0000000000000000     
- i,x =            6   6.0000000000000000     
- i,x =            7   7.0000000000000000     
- i,x =            8   8.0000000000000000     
- i,x =            9   9.0000000000000000     
- i,x =           10   10.000000000000000     
- i,x =           11   11.000000000000000     
- i,x =           12   12.000000000000000     
- i,x =           13   13.000000000000000     
- i,x =           14   14.000000000000000     
- i,x =           15   15.000000000000000     
- i,x =           16   16.000000000000000     
- i,x =           17   17.000000000000000     
- i,x =           18   18.000000000000000     
- i,x =           19   19.000000000000000     
- i,x =           20   20.000000000000000     
- sum(y) =   210.00000000000000
-{{< /highlight >}}
-{{% /expand %}}
-
-{{%expand "C.out" %}}
-{{< highlight batchfile>}}
-i,x= 1 1.000000
-i,x= 2 2.000000
-i,x= 3 3.000000
-i,x= 4 4.000000
-i,x= 5 5.000000
-i,x= 6 6.000000
-i,x= 7 7.000000
-i,x= 8 8.000000
-i,x= 9 9.000000
-i,x= 10 10.000000
-i,x= 11 11.000000
-i,x= 12 12.000000
-i,x= 13 13.000000
-i,x= 14 14.000000
-i,x= 15 15.000000
-i,x= 16 16.000000
-i,x= 17 17.000000
-i,x= 18 18.000000
-i,x= 19 19.000000
-i,x= 20 20.000000
-sum(y)= 210.000000
-{{< /highlight >}}
-{{% /expand %}}
diff --git a/content/guides/running_applications/linux_file_permissions.md b/content/guides/running_applications/linux_file_permissions.md
deleted file mode 100644
index 18e63a5996c8c6786a49dfd258e61fe593496635..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/linux_file_permissions.md
+++ /dev/null
@@ -1,48 +0,0 @@
-+++
-title = "Linux File Permissions"
-description = "How to view and change file permissions with Linux commands"
-weight = 20
-+++
-
-- [Opening a Terminal Window] (#opening-a-terminal-window)
-- [Listing File Permissions] (#listing-file-permissions)
-- [Changing File Permissions] (#changing-file-permissions)
-
-## Opening a Terminal Window
------------------------
-
-Use your local terminal to connect to a cluster, or open a new terminal window on [Crane](https://crane.unl.edu). 
-
-Click [here](https://hcc.unl.edu/docs/quickstarts/connecting/) if you need help connecting to a cluster 
-with a local terminal.
-	
-Click [here](https://hcc.unl.edu/docs/guides/running_applications/jupyter/) if you need 
-help opening a new terminal window within JupyterHub.
-
-## Listing File Permissions
-
-Type the command `ls -l` to list the files and directories with file permissions for your current location.
-
-{{< figure src="/images/LinuxList.png" >}}  
-
-The first character denotes whether an item is a file or a directory. If 'd' is shown, it's a directory, and if '-' is shown, it's a file.
- Following the first character you will see some 
-combination of r,w,x, and -. The first rwx is the ‘read’ ‘write’ ‘execute’ file permissions for the creator
- of that file or directory. A ‘-‘ instead means a particular permission has not been granted. For example “rw-“ means the 
- ‘execute’ permission has not been granted. The next three entries are the permissions for ‘group’ and the last three are the 
- permissions for everyone else.
- 
- Following the file permissions are the name of the creator, the name of the group, the size of the file, the date it was created, and finally 
-the name of the file.
-
-
-## Changing File Permissions
-
-To change file permissions, use the command "chmod [permissions] [filename]" where permissions are indicated by a three-digit code.
-Each digit in the code correspondes to the three digits mentioned above in the permissions printout: One for the creater permissions, 
-one for the group permissions, and one for everyone else. The command is interpreted as follows: 4=read 2=write 1=execute and any combination of these is given by summing their codes.
-Each chmod command will include 3 codes.
-For example, to give the creator of mars.txt rights to read, write and execute, the group rights to read and execute, and everone else only the right to read,
-we would use the command `chmod 754 mars.txt`
-
-{{< figure src="/images/LinuxChange.png" >}}   
diff --git a/content/guides/running_applications/mpi_jobs_on_hcc.md b/content/guides/running_applications/mpi_jobs_on_hcc.md
deleted file mode 100644
index b2aaedbd2549fea480f99ed280e5fb4cbb6a4772..0000000000000000000000000000000000000000
--- a/content/guides/running_applications/mpi_jobs_on_hcc.md
+++ /dev/null
@@ -1,322 +0,0 @@
-+++
-title = "MPI Jobs on HCC"
-description = "How to compile and run MPI programs on HCC machines"
-weight = "52"
-+++
-
-This quick start demonstrates how to implement a parallel (MPI)
-Fortran/C program on HCC supercomputers. The sample codes and submit
-scripts can be downloaded from [mpi_dir.zip](/attachments/mpi_dir.zip).
-
-#### Login to a HCC Cluster
-
-Log in to a HCC cluster through PuTTY ([For Windows Users]({{< relref "/quickstarts/connecting/for_windows_users">}})) or Terminal ([For Mac/Linux
-Users]({{< relref "/quickstarts/connecting/for_maclinux_users">}})) and make a subdirectory called `mpi_dir` under the `$WORK` directory.
-
-{{< highlight bash >}}
-$ cd $WORK
-$ mkdir mpi_dir
-{{< /highlight >}}
-
-In the subdirectory `mpi_dir`, save all the relevant codes. Here we
-include two demo programs, `demo_f_mpi.f90` and `demo_c_mpi.c`, that
-compute the sum from 1 to 20 through parallel processes. A
-straightforward parallelization scheme is used for demonstration
-purpose. First, the master core (i.e. `myid=0`) distributes equal
-computation workload to a certain number of cores (as specified by
-`--ntasks `in the submit script). Then, each worker core computes a
-partial summation as output. Finally, the master core collects the
-outputs from all worker cores and perform an overall summation. For easy
-comparison with the serial code ([Fortran/C on HCC]({{< relref "fortran_c_on_hcc">}})), the
-added lines in the parallel code (MPI) are marked with "!=" or "//=".
-
-{{%expand "demo_f_mpi.f90" %}}
-{{< highlight fortran >}}
-Program demo_f_mpi
-!====== MPI =====
-    use mpi     
-!================
-    implicit none
-    integer, parameter :: N = 20
-    real*8 w
-    integer i
-    common/sol/ x
-    real*8 x
-    real*8, dimension(N) :: y 
-!============================== MPI =================================
-    integer ind
-    real*8, dimension(:), allocatable :: y_local                    
-    integer numnodes,myid,rc,ierr,start_local,end_local,N_local     
-    real*8 allsum                                                   
-!====================================================================
-    
-!============================== MPI =================================
-    call mpi_init( ierr )                                           
-    call mpi_comm_rank ( mpi_comm_world, myid, ierr )               
-    call mpi_comm_size ( mpi_comm_world, numnodes, ierr )           
-                                                                                                                                        !
-    N_local = N/numnodes                                            
-    allocate ( y_local(N_local) )                                   
-    start_local = N_local*myid + 1                                  
-    end_local =  N_local*myid + N_local                             
-!====================================================================
-    do i = start_local, end_local
-        w = i*1d0
-        call proc(w)
-        ind = i - N_local*myid
-        y_local(ind) = x
-!       y(i) = x
-!       write(6,*) 'i, y(i)', i, y(i)
-    enddo   
-!       write(6,*) 'sum(y) =',sum(y)
-!============================================== MPI =====================================================
-    call mpi_reduce( sum(y_local), allsum, 1, mpi_real8, mpi_sum, 0, mpi_comm_world, ierr )             
-    call mpi_gather ( y_local, N_local, mpi_real8, y, N_local, mpi_real8, 0, mpi_comm_world, ierr )     
-                                                                                                        
-    if (myid == 0) then                                                                                 
-        write(6,*) '-----------------------------------------'                                          
-        write(6,*) '*Final output from... myid=', myid                                                  
-        write(6,*) 'numnodes =', numnodes                                                               
-        write(6,*) 'mpi_sum =', allsum  
-        write(6,*) 'y=...'
-        do i = 1, N
-            write(6,*) y(i)
-        enddo                                                                                       
-        write(6,*) 'sum(y)=', sum(y)                                                                
-    endif                                                                                               
-                                                                                                        
-    deallocate( y_local )                                                                               
-    call mpi_finalize(rc)                                                                               
-!========================================================================================================
-    
-Stop
-End Program
-Subroutine proc(w)
-    real*8, intent(in) :: w
-    common/sol/ x
-    real*8 x
-    
-    x = w
-    
-Return
-End Subroutine
-{{< /highlight >}}
-{{% /expand %}}
-
-{{%expand "demo_c_mpi.c" %}}
-{{< highlight c >}}
-//demo_c_mpi
-#include <stdio.h>
-//======= MPI ========
-#include "mpi.h"    
-#include <stdlib.h>   
-//====================
-
-double proc(double w){
-        double x;       
-        x = w;  
-        return x;
-}
-
-int main(int argc, char* argv[]){
-    int N=20;
-    double w;
-    int i;
-    double x;
-    double y[N];
-    double sum;
-//=============================== MPI ============================
-    int ind;                                                    
-    double *y_local;                                            
-    int numnodes,myid,rc,ierr,start_local,end_local,N_local;    
-    double allsum;                                              
-//================================================================
-//=============================== MPI ============================
-    MPI_Init(&argc, &argv);
-    MPI_Comm_rank( MPI_COMM_WORLD, &myid );
-    MPI_Comm_size ( MPI_COMM_WORLD, &numnodes );
-    N_local = N/numnodes;
-    y_local=(double *) malloc(N_local*sizeof(double));
-    start_local = N_local*myid + 1;
-    end_local = N_local*myid + N_local;
-//================================================================
-    
-    for (i = start_local; i <= end_local; i++){        
-        w = i*1e0;
-        x = proc(w);
-        ind = i - N_local*myid;
-        y_local[ind-1] = x;
-//      y[i-1] = x;
-//      printf("i,x= %d %lf\n", i, y[i-1]) ;
-    }
-    sum = 0e0;
-    for (i = 1; i<= N_local; i++){
-        sum = sum + y_local[i-1];   
-    }
-//  printf("sum(y)= %lf\n", sum);    
-//====================================== MPI ===========================================
-    MPI_Reduce( &sum, &allsum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
-    MPI_Gather( &y_local[0], N_local, MPI_DOUBLE, &y[0], N_local, MPI_DOUBLE, 0, MPI_COMM_WORLD );
-    
-    if (myid == 0){
-    printf("-----------------------------------\n");
-    printf("*Final output from... myid= %d\n", myid);
-    printf("numnodes = %d\n", numnodes);
-    printf("mpi_sum = %lf\n", allsum);
-    printf("y=...\n");
-    for (i = 1; i <= N; i++){
-        printf("%lf\n", y[i-1]);
-    }   
-    sum = 0e0;
-    for (i = 1; i<= N; i++){
-        sum = sum + y[i-1]; 
-    }
-    
-    printf("sum(y) = %lf\n", sum);
-    
-    }
-    
-    free( y_local );
-    MPI_Finalize ();
-//======================================================================================        
-
-return 0;
-}
-{{< /highlight >}}
-{{% /expand %}}
-
----
-
-#### Compiling the Code
-
-The compiling of a MPI code requires first loading a compiler "engine"
-such as `gcc`, `intel`, or `pgi` and then loading a MPI wrapper
-`openmpi`. Here we will use the GNU Complier Collection, `gcc`, for
-demonstration.
-
-{{< highlight bash >}}
-$ module load compiler/gcc/6.1 openmpi/2.1
-$ mpif90 demo_f_mpi.f90 -o demo_f_mpi.x  
-$ mpicc demo_c_mpi.c -o demo_c_mpi.x
-{{< /highlight >}}
-
-The above commends load the `gcc` complier with the `openmpi` wrapper.
-The compiling commands `mpif90` or `mpicc` are used to compile the codes
-to`.x` files (executables). 
-
-### Creating a Submit Script
-
-Create a submit script to request 5 cores (with `--ntasks`). A parallel
-execution command `mpirun ./` needs to enter to last line before the
-main program name.
-
-{{% panel header="`submit_f.mpi`"%}}
-{{< highlight bash >}}
-#!/bin/sh
-#SBATCH --ntasks=5
-#SBATCH --mem-per-cpu=1024
-#SBATCH --time=00:01:00
-#SBATCH --job-name=Fortran
-#SBATCH --error=Fortran.%J.err
-#SBATCH --output=Fortran.%J.out
-
-mpirun ./demo_f_mpi.x 
-{{< /highlight >}}
-{{% /panel %}}
-
-{{% panel header="`submit_c.mpi`"%}}
-{{< highlight bash >}}
-#!/bin/sh
-#SBATCH --ntasks=5
-#SBATCH --mem-per-cpu=1024
-#SBATCH --time=00:01:00
-#SBATCH --job-name=C
-#SBATCH --error=C.%J.err
-#SBATCH --output=C.%J.out
-
-mpirun ./demo_c_mpi.x 
-{{< /highlight >}}
-{{% /panel %}}
-
-#### Submit the Job
-
-The job can be submitted through the command `sbatch`. The job status
-can be monitored by entering `squeue` with the `-u` option.
-
-{{< highlight bash >}}
-$ sbatch submit_f.mpi
-$ sbatch submit_c.mpi
-$ squeue -u <username>
-{{< /highlight >}}
-
-Replace `<username>` with your HCC username.
-
-Sample Output
--------------
-
-The sum from 1 to 20 is computed and printed to the `.out` file (see
-below). The outputs from the 5 cores are collected and processed by the
-master core (i.e. `myid=0`).
-
-{{%expand "Fortran.out" %}}
-{{< highlight batchfile>}}
- -----------------------------------------
- *Final output from... myid=           0
- numnodes =           5
- mpi_sum =   210.00000000000000     
- y=...
-   1.0000000000000000     
-   2.0000000000000000     
-   3.0000000000000000     
-   4.0000000000000000     
-   5.0000000000000000     
-   6.0000000000000000     
-   7.0000000000000000     
-   8.0000000000000000     
-   9.0000000000000000     
-   10.000000000000000     
-   11.000000000000000     
-   12.000000000000000     
-   13.000000000000000     
-   14.000000000000000     
-   15.000000000000000     
-   16.000000000000000     
-   17.000000000000000     
-   18.000000000000000     
-   19.000000000000000     
-   20.000000000000000     
- sum(y)=   210.00000000000000     
-{{< /highlight >}}
-{{% /expand %}} 
-
-{{%expand "C.out" %}}
-{{< highlight batchfile>}}
------------------------------------
-*Final output from... myid= 0
-numnodes = 5
-mpi_sum = 210.000000
-y=...
-1.000000
-2.000000
-3.000000
-4.000000
-5.000000
-6.000000
-7.000000
-8.000000
-9.000000
-10.000000
-11.000000
-12.000000
-13.000000
-14.000000
-15.000000
-16.000000
-17.000000
-18.000000
-19.000000
-20.000000
-sum(y) = 210.000000
-{{< /highlight >}}
-{{% /expand %}}
-
diff --git a/content/guides/submitting_jobs/_index.md b/content/guides/submitting_jobs/_index.md
deleted file mode 100644
index 73e0272e1262abf0a7f246837d581e157f07feb8..0000000000000000000000000000000000000000
--- a/content/guides/submitting_jobs/_index.md
+++ /dev/null
@@ -1,198 +0,0 @@
-+++
-title = "Submitting Jobs"
-description =  "How to submit jobs to HCC resources"
-weight = "10"
-+++
-
-Crane and Rhino are managed by
-the [SLURM](https://slurm.schedmd.com) resource manager.  
-In order to run processing on Crane or Rhino, you
-must create a SLURM script that will run your processing. After
-submitting the job, SLURM will schedule your processing on an available
-worker node.
-
-Before writing a submit file, you may need to
-[compile your application]({{< relref "/guides/running_applications/compiling_source_code" >}}).
-
-- [Ensure proper working directory for job output](#ensure-proper-working-directory-for-job-output)
-- [Creating a SLURM Submit File](#creating-a-slurm-submit-file)
-- [Submitting the job](#submitting-the-job)
-- [Checking Job Status](#checking-job-status)
-  -   [Checking Job Start](#checking-job-start)
-- [Next Steps](#next-steps)
-
-
-### Ensure proper working directory for job output
-
-{{% notice info %}}
-All SLURM job output should be directed to your /work path.
-{{% /notice %}}
-
-{{% panel theme="info" header="Manual specification of /work path" %}}
-{{< highlight bash >}}
-$ cd /work/[groupname]/[username]
-{{< /highlight >}}
-{{% /panel %}}
-
-The environment variable `$WORK` can also be used.
-{{% panel theme="info" header="Using environment variable for /work path" %}}
-{{< highlight bash >}}
-$ cd $WORK
-$ pwd
-/work/[groupname]/[username]
-{{< /highlight >}}
-{{% /panel %}}
-
-Review how /work differs from /home [here.]({{< relref "/guides/handling_data/_index.md" >}})
-
-### Creating a SLURM Submit File
-
-{{% notice info %}}
-The below example is for a serial job. For submitting MPI jobs, please
-look at the [MPI Submission Guide.]({{< relref "submitting_an_mpi_job" >}})
-{{% /notice %}}
-
-A SLURM submit file is broken into 2 sections, the job description and
-the processing.  SLURM job description are prepended with `#SBATCH` in
-the submit file.
-
-**SLURM Submit File**
-
-{{< highlight batch >}}
-#!/bin/sh
-#SBATCH --time=03:15:00          # Run time in hh:mm:ss
-#SBATCH --mem-per-cpu=1024       # Maximum memory required per CPU (in megabytes)
-#SBATCH --job-name=hello-world
-#SBATCH --error=/work/[groupname]/[username]/job.%J.err
-#SBATCH --output=/work/[groupname]/[username]/job.%J.out
-
-module load example/test
-
-hostname
-sleep 60
-{{< /highlight >}}
-
-- **time**  
-  Maximum walltime the job can run.  After this time has expired, the
-  job will be stopped.
-- **mem-per-cpu**  
-  Memory that is allocated per core for the job.  If you exceed this
-  memory limit, your job will be stopped.
-- **mem**  
-  Specify the real memory required per node in MegaBytes. If you
-  exceed this limit, your job will be stopped. Note that for you
-  should ask for less memory than each node actually has. For
-  instance, Rhino has 1TB, 512GB, 256GB, and 192GB of RAM per node. You may
-  only request 1000GB of RAM for the 1TB node, 500GB of RAM for the
-  512GB nodes, 250GB of RAM for the 256GB nodes, and 187.5GB for the 192 nodes.
-  For Crane, the max is 500GB.
-- **job-name**
-  The name of the job.  Will be reported in the job listing.
-- **partition**  
-  The partition the job should run in.  Partitions determine the job's
-  priority and on what nodes the partition can run on.  See the
-  [Partitions]({{< relref "partitions" >}}) page for a list of possible partitions.
-- **error**  
-  Location of the stderr will be written for the job.  `[groupname]`
-  and `[username]` should be replaced your group name and username.
-  Your username can be retrieved with the command `id -un` and your
-  group with `id -ng`.
-- **output**  
-  Location of the stdout will be written for the job.
-
-More advanced submit commands can be found on the [SLURM Docs](https://slurm.schedmd.com/sbatch.html).
-You can also find an example of a MPI submission on [Submitting an MPI Job]({{< relref "submitting_an_mpi_job" >}}).
-
-### Submitting the job
-
-Submitting the SLURM job is done by command `sbatch`.  SLURM will read
-the submit file, and schedule the job according to the description in
-the submit file.
-
-Submitting the job described above is:
-
-{{% panel theme="info" header="SLURM Submission" %}}
-{{< highlight batch >}}
-$ sbatch example.slurm
-Submitted batch job 24603
-{{< /highlight >}}
-{{% /panel %}}
-
-The job was successfully submitted.
-
-### Checking Job Status
-
-Job status is found with the command `squeue`.  It will provide
-information such as:
-
-- The State of the job: 
-    - **R** - Running
-    - **PD** - Pending - Job is awaiting resource allocation.
-    - Additional codes are available
-      on the [squeue](http://slurm.schedmd.com/squeue.html)
-      page.
-- Job Name
-- Run Time
-- Nodes running the job
-
-Checking the status of the job is easiest by filtering by your username,
-using the `-u` option to squeue.
-
-{{< highlight batch >}}
-$ squeue -u <username>
-  JOBID PARTITION     NAME       USER  ST       TIME  NODES NODELIST(REASON)
-  24605     batch hello-wo <username>   R       0:56      1 b01
-{{< /highlight >}}
-
-Additionally, if you want to see the status of a specific partition, for
-example if you are part of a [partition]({{< relref "partitions" >}}),
-you can use the `-p` option to `squeue`:
-
-{{< highlight batch >}}
-$ squeue -p esquared
-  JOBID PARTITION     NAME     USER  ST       TIME  NODES NODELIST(REASON)
-  73435  esquared MyRandom tingting   R   10:35:20      1 ri19n10
-  73436  esquared MyRandom tingting   R   10:35:20      1 ri19n12
-  73735  esquared SW2_driv   hroehr   R   10:14:11      1 ri20n07
-  73736  esquared SW2_driv   hroehr   R   10:14:11      1 ri20n07
-{{< /highlight >}}
-
-#### Checking Job Start
-
-You may view the start time of your job with the
-command `squeue --start`.  The output of the command will show the
-expected start time of the jobs.
-
-{{< highlight batch >}}
-$ squeue --start --user lypeng
-  JOBID PARTITION     NAME     USER  ST           START_TIME  NODES NODELIST(REASON)
-   5822     batch  Starace   lypeng  PD  2013-06-08T00:05:09      3 (Priority)
-   5823     batch  Starace   lypeng  PD  2013-06-08T00:07:39      3 (Priority)
-   5824     batch  Starace   lypeng  PD  2013-06-08T00:09:09      3 (Priority)
-   5825     batch  Starace   lypeng  PD  2013-06-08T00:12:09      3 (Priority)
-   5826     batch  Starace   lypeng  PD  2013-06-08T00:12:39      3 (Priority)
-   5827     batch  Starace   lypeng  PD  2013-06-08T00:12:39      3 (Priority)
-   5828     batch  Starace   lypeng  PD  2013-06-08T00:12:39      3 (Priority)
-   5829     batch  Starace   lypeng  PD  2013-06-08T00:13:09      3 (Priority)
-   5830     batch  Starace   lypeng  PD  2013-06-08T00:13:09      3 (Priority)
-   5831     batch  Starace   lypeng  PD  2013-06-08T00:14:09      3 (Priority)
-   5832     batch  Starace   lypeng  PD                  N/A      3 (Priority)
-{{< /highlight >}}
-
-The output shows the expected start time of the jobs, as well as the
-reason that the jobs are currently idle (in this case, low priority of
-the user due to running numerous jobs already).
- 
-#### Removing the Job
-
-Removing the job is done with the `scancel` command.  The only argument
-to the `scancel` command is the job id.  For the job above, the command
-is:
-
-{{< highlight batch >}}
-$ scancel 24605
-{{< /highlight >}}
-
-### Next Steps
-
-{{% children  %}} 
diff --git a/content/guides/submitting_jobs/condor_jobs_on_hcc.md b/content/guides/submitting_jobs/condor_jobs_on_hcc.md
deleted file mode 100644
index 4a2ab4ea601d19fd3fc84b4861002914126f937d..0000000000000000000000000000000000000000
--- a/content/guides/submitting_jobs/condor_jobs_on_hcc.md
+++ /dev/null
@@ -1,219 +0,0 @@
-+++
-title = "Condor Jobs on HCC"
-description = "How to run jobs using Condor on HCC machines"
-weight = "54"
-+++
-
-This quick start demonstrates how to run multiple copies of Fortran/C program
-using Condor on HCC supercomputers. The sample codes and submit scripts
-can be downloaded from [condor_dir.zip](/attachments/3178558.zip).
-
-#### Login to a HCC Cluster
-
-Log in to a HCC cluster through PuTTY ([For Windows Users]({{< relref "/quickstarts/connecting/for_windows_users">}})) or Terminal ([For Mac/Linux Users]({{< relref "/quickstarts/connecting/for_maclinux_users">}})) and make a subdirectory called `condor_dir` under the `$WORK` directory.  In the subdirectory `condor_dir`, create job  subdirectories that host the input data files. Here we create two job subdirectories, `job_0` and `job_1`, and put a data file (`data.dat`) in each subdirectory. The data file in `job_0` has a column of data listing the integers from 1 to 5. The data file in `job_1` has a integer list from 6 to 10. 
-
-{{< highlight bash >}}
-$ cd $WORK
-$ mkdir condor_dir
-$ cd condor_dir
-$ mkdir job_0
-$ mkdir job_1
-{{< /highlight >}}
-
-In the subdirectory condor`_dir`, save all the relevant codes. Here we
-include two demo programs, `demo_f_condor.f90` and `demo_c_condor.c`,
-that compute the sum of the data stored in each job subdirectory
-(`job_0` and `job_1`). The parallelization scheme here is as the
-following. First, the master computer node send out many copies of the
-executable from the `condor_dir` subdirectory and a copy of the data
-file in each job subdirectories. The number of executable copies is
-specified in the submit script (`queue`), and it usually matches with
-the number of job subdirectories. Next, the workload is distributed
-among a pool of worker computer nodes. At any given time, the number of
-available worker nodes may vary. Each worker node executes the jobs
-independent of other worker nodes. The output files are separately
-stored in the job subdirectory. No additional coding are needed to make
-the serial code turned "parallel". Parallelization here is achieved
-through the submit script. 
-
-{{%expand "demo_condor.f90" %}}
-{{< highlight fortran >}}
-Program demo_f_condor
-    implicit none
-    integer, parameter :: N = 5
-    real*8 w
-    integer i
-    common/sol/ x
-    real*8 x
-    real*8, dimension(N) :: y_local
-    real*8, dimension(N) :: input_data
-    
-    open(10, file='data.dat')
-    
-    do i = 1,N
-        read(10,*) input_data(i)
-    enddo
-    
-    do i = 1,N
-        w = input_data(i)*1d0
-        call proc(w)
-        y_local(i) = x      
-        write(6,*) 'i,x = ', i, y_local(i)
-    enddo
-    write(6,*) 'sum(y) =',sum(y_local)
-Stop
-End Program
-Subroutine proc(w)
-    real*8, intent(in) :: w
-    common/sol/ x
-    real*8 x
-    
-    x = w
-    
-Return
-End Subroutine
-{{< /highlight >}}
-{{% /expand %}}
-
-
-{{%expand "demo_c_condor.c" %}}
-{{< highlight c >}}
-//demo_c_condor
-#include <stdio.h>
-
-double proc(double w){
-        double x;       
-        x = w;  
-        return x;
-}
-
-int main(int argc, char* argv[]){
-    int N=5;
-    double w;
-    int i;
-    double x;
-    double y_local[N];
-    double sum; 
-    double input_data[N];
-    FILE *fp;
-    fp = fopen("data.dat","r");
-    for (i = 1; i<= N; i++){
-    fscanf(fp, "%lf", &input_data[i-1]);
-    }
-    
-    for (i = 1; i <= N; i++){        
-        w = input_data[i-1]*1e0;
-        x = proc(w);
-        y_local[i-1] = x;
-        printf("i,x= %d %lf\n", i, y_local[i-1]) ;
-    }
-    
-    sum = 0e0;
-    for (i = 1; i<= N; i++){
-        sum = sum + y_local[i-1];   
-    }
-    
-    printf("sum(y)= %lf\n", sum);    
-return 0;
-}
-{{< /highlight >}}
-{{% /expand %}}
-
----
-
-#### Compiling the Code
-
-The compiled executable needs to match the "standard" environment of the
-worker node. The easies way is to directly use the compilers installed
-on the HCC supercomputer without loading extra modules. The standard
-compiler of the HCC supercomputer is GNU Compier Collection. The version
-can be looked up by the command lines `gcc -v` or `gfortran -v`.
-
-
-{{< highlight bash >}}
-$ gfortran demo_f_condor.f90 -o demo_f_condor.x
-$ gcc demo_c_condor.c -o demo_c_condor.x
-{{< /highlight >}}
-
-#### Creating a Submit Script
-
-Create a submit script to request 2 jobs (queue). The name of the job
-subdirectories is specified in the line `initialdir`. The
-`$(process)` macro assigns integer numbers to the job subdirectory
-name `job_`. The numbers run form `0` to `queue-1`. The name of the input
-data file is specified in the line `transfer_input_files`.
-
-{{% panel header="`submit_f.condor`"%}}
-{{< highlight bash >}}
-universe = grid
-grid_resource = pbs
-batch_queue = guest
-should_transfer_files = yes
-when_to_transfer_output = on_exit
-executable = demo_f_condor.x
-output = Fortran_$(process).out
-error = Fortran_$(process).err
-initialdir = job_$(process)
-transfer_input_files = data.dat
-queue 2
-{{< /highlight >}}
-{{% /panel %}}
-
-{{% panel header="`submit_c.condor`"%}}
-{{< highlight bash >}}
-universe = grid
-grid_resource = pbs
-batch_queue = guest
-should_transfer_files = yes
-when_to_transfer_output = on_exit
-executable = demo_c_condor.x
-output = C_$(process).out
-error = C_$(process).err
-initialdir = job_$(process)
-transfer_input_files = data.dat
-queue 2
-{{< /highlight >}}
-{{% /panel %}}
-
-#### Submit the Job
-
-The job can be submitted through the command `condor_submit`. The job
-status can be monitored by entering `condor_q` followed by the
-username. 
-
-{{< highlight bash >}}
-$ condor_submit submit_f.condor
-$ condor_submit submit_c.condor
-$ condor_q <username>
-{{< /highlight >}}
-
-Replace `<username>` with your HCC username.
-
-Sample Output
--------------
-
-In the job subdirectory `job_0`, the sum from 1 to 5 is computed and
-printed to the `.out` file. In the job subdirectory `job_1`, the sum
-from 6 to 10 is computed and printed to the `.out` file. 
-
-{{%expand "Fortran_0.out" %}}
-{{< highlight batchfile>}}
- i,x =            1   1.0000000000000000     
- i,x =            2   2.0000000000000000     
- i,x =            3   3.0000000000000000     
- i,x =            4   4.0000000000000000     
- i,x =            5   5.0000000000000000     
- sum(y) =   15.000000000000000     
-{{< /highlight >}}
-{{% /expand %}}
-
-{{%expand "Fortran_1.out" %}}
-{{< highlight batchfile>}}
- i,x =            1   6.0000000000000000     
- i,x =            2   7.0000000000000000     
- i,x =            3   8.0000000000000000     
- i,x =            4   9.0000000000000000     
- i,x =            5   10.000000000000000     
- sum(y) =   40.000000000000000     
-{{< /highlight >}}
-{{% /expand %}}
diff --git a/content/guides/submitting_jobs/submitting_an_openmp_job.md b/content/guides/submitting_jobs/submitting_an_openmp_job.md
deleted file mode 100644
index 7c04168c8d6bc215cdb771a76ab3acc2e23f5eba..0000000000000000000000000000000000000000
--- a/content/guides/submitting_jobs/submitting_an_openmp_job.md
+++ /dev/null
@@ -1,42 +0,0 @@
-+++
-title = "Submitting an OpenMP Job"
-description =  "How to submit an OpenMP job on HCC resources."
-+++
-
-Submitting an OpenMP job is different from
-[Submitting an MPI Job]({{< relref "submitting_an_mpi_job" >}})
-since you must request multiple cores from a single node.
-
-{{% panel theme="info" header="OpenMP example submission" %}}
-{{< highlight batch >}}
-#!/bin/sh
-#SBATCH --ntasks-per-node=16     # 16 cores
-#SBATCH --nodes=1                # 1 node
-#SBATCH --mem-per-cpu=1024       # Minimum memory required per CPU (in megabytes)
-#SBATCH --time=03:15:00          # Run time in hh:mm:ss
-#SBATCH --error=/work/[groupname]/[username]/job.%J.err
-#SBATCH --output=/work/[groupname]/[username]/job.%J.out
- 
-export OMP_NUM_THREADS=${SLURM_NTASKS_PER_NODE}
-./openmp-app.exe 
-{{< /highlight >}}
-{{% /panel %}}
-
-Notice that we used `ntasks-per-node` to specify the number of cores we
-want on a single node.  Additionally, we specify that we only want
-1 `node`.  
-
-`OMP_NUM_THREADS` is required to limit the number of cores that OpenMP
-will use on the node.  It is set to ${SLURM_NTASKS_PER_NODE} to
-automatically  match the `ntasks-per-node` value (in this example 16).
-
-### Compiling
-
-Directions to compile OpenMP can be found on 
-[Compiling an OpenMP Application]
-({{< relref "/guides/running_applications/compiling_source_code/compiling_an_openmp_application" >}}).  
-
-### Further Documentation
-
-Further OpenMP documentation can be found on LLNL's
-[OpenMP](https://computing.llnl.gov/tutorials/openMP) website.
diff --git a/content/guides/handling_data/_index.md b/content/handling_data/_index.md
similarity index 67%
rename from content/guides/handling_data/_index.md
rename to content/handling_data/_index.md
index 75d0bff3c1bbde296a74b355aeb455c63a3c1217..1b648b4155e1ae350ace2c08ba9673fa4bd9932b 100644
--- a/content/guides/handling_data/_index.md
+++ b/content/handling_data/_index.md
@@ -7,8 +7,8 @@ weight = "30"
 {{% panel theme="danger" header="**Sensitive and Protected Data**" %}}HCC currently has *no storage* that is suitable for **HIPAA** or other **PID** data sets.  Users are not permitted to store such data on HCC machines.{{% /panel %}}
 
 All HCC machines have three separate areas for every user to store data,
-each intended for a different purpose.   In addition, we have a transfer
-service that utilizes [Globus Connect]({{< relref "globus_connect" >}}).
+each intended for a different purpose.   In addition, we have a transfer
+service that utilizes [Globus Connect]({{< relref "data_transfer/globus_connect/" >}}).
 {{< figure src="/images/35325560.png" height="500" class="img-border">}}
 
 ---
@@ -20,11 +20,11 @@ variable (i.e. '`cd $HOME'`).
 {{% /notice %}}
 
 Your home directory (i.e. `/home/[group]/[username]`) is meant for items
-that take up relatively small amounts of space.  For example:  source
-code, program binaries, configuration files, etc.  This space is
-quota-limited to **20GB per user**.  The home directories are backed up
-for the purposes of best-effort disaster recovery.  This space is not
-intended as an area for I/O to active jobs.  **/home** is mounted
+that take up relatively small amounts of space.  For example:  source
+code, program binaries, configuration files, etc.  This space is
+quota-limited to **20GB per user**.  The home directories are backed up
+for the purposes of best-effort disaster recovery.  This space is not
+intended as an area for I/O to active jobs.  **/home** is mounted
 **read-only** on cluster worker nodes to enforce this policy.
 
 ---
@@ -67,12 +67,12 @@ variable (i.e. '`cd $WORK'`).
 {{% panel theme="danger" header="**File Loss**" %}}The `/work` directories are **not backed up**. Irreparable data loss is possible with a mis-typed command. See [Preventing File Loss]({{< relref "preventing_file_loss" >}}) for strategies to avoid this.{{% /panel %}}
 
 Every user has a corresponding directory under /work using the same
-naming convention as `/home` (i.e. `/work/[group]/[username]`).  We
-encourage all users to use this space for I/O to running jobs.  This
+naming convention as `/home` (i.e. `/work/[group]/[username]`).  We
+encourage all users to use this space for I/O to running jobs.  This
 directory can also be used when larger amounts of space are temporarily
-needed.  There is a **50TB per group quota**; space in /work is shared
-among all users.  It should be treated as short-term scratch space, and
-**is not backed up**.  **Please use the `hcc-du` command to check your
+needed.  There is a **50TB per group quota**; space in /work is shared
+among all users.  It should be treated as short-term scratch space, and
+**is not backed up**.  **Please use the `hcc-du` command to check your
 own and your group's usage, and back up and clean up your files at
 reasonable intervals in $WORK.**
 
@@ -80,17 +80,17 @@ reasonable intervals in $WORK.**
 ### Purge Policy
 
 HCC has a **purge policy on /work** for files that become dormant.
- After **6 months of inactivity on a file (26 weeks)**, an automated
-purge process will reclaim the used space of these dormant files.  HCC
+ After **6 months of inactivity on a file (26 weeks)**, an automated
+purge process will reclaim the used space of these dormant files.  HCC
 provides the **`hcc-purge`** utility to list both the summary and the
 actual file paths of files that have been dormant for **24 weeks**.
- This list is periodically generated; the timestamp of the last search
+ This list is periodically generated; the timestamp of the last search
 is included in the default summary output when calling `hcc-purge` with
-no arguments.  No output from `hcc-purge` indicates the last scan did
-not find any dormant files.  `hcc-purge -l` will use the less pager to
-list the matching files for the user.  The candidate list can also be
+no arguments.  No output from `hcc-purge` indicates the last scan did
+not find any dormant files.  `hcc-purge -l` will use the less pager to
+list the matching files for the user.  The candidate list can also be
 accessed at the following path:` /lustre/purge/current/${USER}.list`.
- This list is updated twice a week, on Mondays and Thursdays.
+ This list is updated twice a week, on Mondays and Thursdays.
 
 {{% notice warning %}}
 `/work` is intended for recent job output and not long term storage. Evidence of circumventing the purge policy by users will result in consequences including account lockout.
@@ -98,33 +98,33 @@ accessed at the following path:` /lustre/purge/current/${USER}.list`.
 
 If you have space requirements outside what is currently provided,
 please
-email <a href="mailto:hcc-support@unl.edu" class="external-link">hcc-support@unl.edu</a> and
+email <a href="mailto:hcc-support@unl.edu" class="external-link">hcc-support@unl.edu</a> and
 we will gladly discuss alternatives.
 
 ---
 ### [Attic]({{< relref "using_attic" >}})
 
-Attic is a near line archive available for purchase at HCC.  Attic
-provides reliable large data storage that is designed to be more
-reliable then `/work`, and larger than `/home`. Access to Attic is done
-through [Globus Connect]({{< relref "globus_connect" >}}).
+Attic is a near line archive available for purchase at HCC.  Attic
+provides reliable large data storage that is designed to be more
+reliable then `/work`, and larger than `/home`. Access to Attic is done
+through [Globus Connect]({{< relref "data_transfer/globus_connect/" >}}).
 
 More details on Attic can be found on HCC's
 <a href="https://hcc.unl.edu/attic" class="external-link">Attic</a>
 website.
 
 ---
-### [Globus Connect]({{< relref "globus_connect" >}})
+### [Globus Connect]({{< relref "data_transfer/globus_connect/" >}})
 
 For moving large amounts of data into or out of HCC resources, users are
-highly encouraged to consider using [Globus
-Connect]({{< relref "globus_connect" >}}).
+highly encouraged to consider using [Globus
+Connect]({{< relref "data_transfer/globus_connect/" >}}).
 
 ---
 ### Using Box
 
 You can use your [UNL
-Box.com]({{< relref "integrating_box_with_hcc" >}}) account to download and
+Box.com]({{< relref "integrating_box_with_hcc" >}}) account to download and
 upload files from any of the HCC clusters.
 
 
diff --git a/content/handling_data/data_storage/_index.md b/content/handling_data/data_storage/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..33f75fddb02cc63d68ab4c867e9d4c778a83ffd0
--- /dev/null
+++ b/content/handling_data/data_storage/_index.md
@@ -0,0 +1,129 @@
++++
+title = "Data Storage"
+description = "How to work with and transfer data to/from HCC resources."
+weight = "30"
++++
+
+{{% panel theme="danger" header="**Sensitive and Protected Data**" %}}HCC currently has *no storage* that is suitable for **HIPAA** or other **PID** data sets.  Users are not permitted to store such data on HCC machines.{{% /panel %}}
+
+All HCC machines have three separate areas for every user to store data,
+each intended for a different purpose.   In addition, we have a transfer
+service that utilizes [Globus Connect]({{< relref "../data_transfer/globus_connect" >}}).
+{{< figure src="/images/35325560.png" >}}
+
+---
+### Home Directory
+
+{{% notice info %}}
+You can access your home directory quickly using the $HOME environmental
+variable (i.e. '`cd $HOME'`).
+{{% /notice %}}
+
+Your home directory (i.e. `/home/[group]/[username]`) is meant for items
+that take up relatively small amounts of space.  For example:  source
+code, program binaries, configuration files, etc.  This space is
+quota-limited to **20GB per user**.  The home directories are backed up
+for the purposes of best-effort disaster recovery.  This space is not
+intended as an area for I/O to active jobs.  **/home** is mounted
+**read-only** on cluster worker nodes to enforce this policy.
+
+---
+### Common Directory
+
+{{% notice info %}}
+You can access your common directory quickly using the $COMMON
+environmental variable (i.e. '`cd $COMMON`')
+{{% /notice %}}
+
+The common directory operates similarly to work and is mounted with
+**read and write capability to worker nodes all HCC Clusters**. This
+means that any files stored in common can be accessed from Crane and Tusker, making this directory ideal for items that need to be
+accessed from multiple clusters such as reference databases and shared
+data files.
+
+{{% notice warning %}}
+Common is not designed for heavy I/O usage. Please continue to use your
+work directory for active job output to ensure the best performance of
+your jobs.
+{{% /notice %}}
+
+Quotas for common are **30 TB per group**, with larger quotas available
+for purchase if needed. However, files stored here will **not be backed
+up** and are **not subject to purge** at this time. Please continue to
+backup your files to prevent irreparable data loss.
+
+Additional information on using the common directories can be found in
+the documentation on [Using the /common File System]({{< relref "using_the_common_file_system" >}})
+
+---
+### High Performance Work Directory
+
+{{% notice info %}}
+You can access your work directory quickly using the $WORK environmental
+variable (i.e. '`cd $WORK'`).
+{{% /notice %}}
+
+{{% panel theme="danger" header="**File Loss**" %}}The `/work` directories are **not backed up**. Irreparable data loss is possible with a mis-typed command. See [Preventing File Loss]({{< relref "preventing_file_loss" >}}) for strategies to avoid this.{{% /panel %}}
+
+Every user has a corresponding directory under /work using the same
+naming convention as `/home` (i.e. `/work/[group]/[username]`).  We
+encourage all users to use this space for I/O to running jobs.  This
+directory can also be used when larger amounts of space are temporarily
+needed.  There is a **50TB per group quota**; space in /work is shared
+among all users.  It should be treated as short-term scratch space, and
+**is not backed up**.  **Please use the `hcc-du` command to check your
+own and your group's usage, and back up and clean up your files at
+reasonable intervals in $WORK.**
+
+---
+### Purge Policy
+
+HCC has a **purge policy on /work** for files that become dormant.
+ After **6 months of inactivity on a file (26 weeks)**, an automated
+purge process will reclaim the used space of these dormant files.  HCC
+provides the **`hcc-purge`** utility to list both the summary and the
+actual file paths of files that have been dormant for **24 weeks**.
+ This list is periodically generated; the timestamp of the last search
+is included in the default summary output when calling `hcc-purge` with
+no arguments.  No output from `hcc-purge` indicates the last scan did
+not find any dormant files.  `hcc-purge -l` will use the less pager to
+list the matching files for the user.  The candidate list can also be
+accessed at the following path:` /lustre/purge/current/${USER}.list`.
+ This list is updated twice a week, on Mondays and Thursdays.
+
+{{% notice warning %}}
+`/work` is intended for recent job output and not long term storage. Evidence of circumventing the purge policy by users will result in consequences including account lockout.
+{{% /notice %}}
+
+If you have space requirements outside what is currently provided,
+please
+email <a href="mailto:hcc-support@unl.edu" class="external-link">hcc-support@unl.edu</a> and
+we will gladly discuss alternatives.
+
+---
+### [Attic]({{< relref "using_attic" >}})
+
+Attic is a near line archive available for purchase at HCC.  Attic
+provides reliable large data storage that is designed to be more
+reliable then `/work`, and larger than `/home`. Access to Attic is done
+through [Globus Connect]({{< relref "../data_transfer/globus_connect" >}}).
+
+More details on Attic can be found on HCC's
+<a href="https://hcc.unl.edu/attic" class="external-link">Attic</a>
+website.
+
+---
+### [Globus Connect]({{< relref "../data_transfer/globus_connect" >}})
+
+For moving large amounts of data into or out of HCC resources, users are
+highly encouraged to consider using [Globus
+Connect]({{< relref "../data_transfer/globus_connect" >}}).
+
+---
+### Using Box
+
+You can use your [UNL
+Box.com]({{< relref "integrating_box_with_hcc" >}}) account to download and
+upload files from any of the HCC clusters.
+
+
diff --git a/content/guides/handling_data/data_for_unmc_users_only.md b/content/handling_data/data_storage/data_for_unmc_users_only.md
similarity index 99%
rename from content/guides/handling_data/data_for_unmc_users_only.md
rename to content/handling_data/data_storage/data_for_unmc_users_only.md
index 9fb44bc222fe869a6bc20d56344e8060c16aad7c..708c9140488620c85c0733c79d71ed11bd3e78fc 100644
--- a/content/guides/handling_data/data_for_unmc_users_only.md
+++ b/content/handling_data/data_storage/data_for_unmc_users_only.md
@@ -1,7 +1,7 @@
 +++ 
 title = "Data for UNMC Users Only"
 description= "Data storage options for UNMC users"
-weight = 50
+weight = 60
 +++
 
 {{% panel theme="danger" header="Sensitive and Protected Data" %}} HCC currently has no storage that is suitable for HIPAA or other PID
diff --git a/content/guides/handling_data/integrating_box_with_hcc.md b/content/handling_data/data_storage/integrating_box_with_hcc.md
similarity index 99%
rename from content/guides/handling_data/integrating_box_with_hcc.md
rename to content/handling_data/data_storage/integrating_box_with_hcc.md
index 4234c991fd347ef630102ee9d04689201bd0a0d0..1d4549eeed198c27adc2c4b7f432c53a0c1b67cc 100644
--- a/content/guides/handling_data/integrating_box_with_hcc.md
+++ b/content/handling_data/data_storage/integrating_box_with_hcc.md
@@ -1,7 +1,7 @@
 +++
 title = "Integrating Box with HCC"
 description = "How to integrate Box with HCC"
-weight = 30
+weight = 50
 +++
 
 UNL has come to an arrangement
diff --git a/content/handling_data/data_storage/linux_file_permissions.md b/content/handling_data/data_storage/linux_file_permissions.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/handling_data/data_storage/preventing_file_loss.md b/content/handling_data/data_storage/preventing_file_loss.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/handling_data/data_storage/using_attic.md b/content/handling_data/data_storage/using_attic.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/handling_data/using_nus_gitlab_instance/_index.md b/content/handling_data/data_storage/using_nus_gitlab_instance/_index.md
similarity index 99%
rename from content/guides/handling_data/using_nus_gitlab_instance/_index.md
rename to content/handling_data/data_storage/using_nus_gitlab_instance/_index.md
index d850551d11ce6da7fa6c2ef0a4e20db5e35c3764..0a34ebffa48b21365bec2dca0e52771aa780a8f9 100644
--- a/content/guides/handling_data/using_nus_gitlab_instance/_index.md
+++ b/content/handling_data/data_storage/using_nus_gitlab_instance/_index.md
@@ -1,7 +1,7 @@
 +++
 title = "Using NU's Gitlab instance"
 description = "How to use the NU-hosted git.unl.edu"
-weight = "40"
+weight = "70"
 +++
 
 [Git](http://git-scm.com) is a free
diff --git a/content/guides/handling_data/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md b/content/handling_data/data_storage/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md
similarity index 100%
rename from content/guides/handling_data/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md
rename to content/handling_data/data_storage/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md
diff --git a/content/guides/handling_data/using_the_common_file_system.md b/content/handling_data/data_storage/using_the_common_file_system.md
similarity index 99%
rename from content/guides/handling_data/using_the_common_file_system.md
rename to content/handling_data/data_storage/using_the_common_file_system.md
index 5a5ecdb015fc0c6b041eb0e46a71097610fbc5e0..1addd06773cd0b0fedf5b0c51de799f323541f22 100644
--- a/content/guides/handling_data/using_the_common_file_system.md
+++ b/content/handling_data/data_storage/using_the_common_file_system.md
@@ -1,7 +1,7 @@
 +++
 title = "Using the /common File System"
 description = "How to use HCC's /common file system"
-weight = 70
+weight = 30
 +++
 
 ### Quick overview: 
diff --git a/content/handling_data/data_transfer/_index.md b/content/handling_data/data_transfer/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..3717077b3d42092e548c6262cca02ebaf9bba969
--- /dev/null
+++ b/content/handling_data/data_transfer/_index.md
@@ -0,0 +1,20 @@
++++
+title = "Data Transfer"
+description = "How to transfer data to/from HCC resources."
+weight = "30"
++++
+
+### [Globus Connect]({{< relref "../data_transfer/globus_connect/" >}})
+
+For moving large amounts of data into or out of HCC resources, users are
+highly encouraged to consider using [Globus
+Connect]({{< relref "../data_transfer/globus_connect/" >}}).
+
+---
+### Using Box
+
+You can use your [UNL
+Box.com]({{< relref "integrating_box_with_hcc" >}}) account to download and
+upload files from any of the HCC clusters.
+
+
diff --git a/content/guides/handling_data/connect_to_cb3_irods.md b/content/handling_data/data_transfer/connect_to_cb3_irods.md
similarity index 99%
rename from content/guides/handling_data/connect_to_cb3_irods.md
rename to content/handling_data/data_transfer/connect_to_cb3_irods.md
index ed03f1ee1fc28fed6ec7cf043a1a1921e19e0584..c123dd62309960df2f6ffa1a3c4c59b9fb2258e3 100644
--- a/content/guides/handling_data/connect_to_cb3_irods.md
+++ b/content/handling_data/data_transfer/connect_to_cb3_irods.md
@@ -1,7 +1,7 @@
 +++ 
 title = "Connecting to CB3 iRODS"
 description= "How to connect to the CB3 iRODS instance."
-weight = 80
+weight = 40
 +++
 
 {{% panel theme="danger" header="Sensitive and Protected Data" %}} 
diff --git a/content/guides/handling_data/globus_connect/_index.md b/content/handling_data/data_transfer/globus_connect/_index.md
similarity index 99%
rename from content/guides/handling_data/globus_connect/_index.md
rename to content/handling_data/data_transfer/globus_connect/_index.md
index ae0585e118fce8e4c6ddac35651eedad39f4b52a..61830c406dd9477414c72525536790a8c01ead65 100644
--- a/content/guides/handling_data/globus_connect/_index.md
+++ b/content/handling_data/data_transfer/globus_connect/_index.md
@@ -1,6 +1,7 @@
 +++
 title = "Globus Connect"
 description = "Globus Connect overview"
+weight = 20
 +++
 
 <a href="https://www.globus.org/globus-connect" class="external-link">Globus Connect</a> is
diff --git a/content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md b/content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/handling_data/globus_connect/creating_globus_groups.md b/content/handling_data/data_transfer/globus_connect/creating_globus_groups.md
similarity index 100%
rename from content/guides/handling_data/globus_connect/creating_globus_groups.md
rename to content/handling_data/data_transfer/globus_connect/creating_globus_groups.md
diff --git a/content/guides/handling_data/globus_connect/file_sharing.md b/content/handling_data/data_transfer/globus_connect/file_sharing.md
similarity index 100%
rename from content/guides/handling_data/globus_connect/file_sharing.md
rename to content/handling_data/data_transfer/globus_connect/file_sharing.md
diff --git a/content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md b/content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/handling_data/globus_connect/file_transfers_to_and_from_personal_workstations.md b/content/handling_data/data_transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md
similarity index 100%
rename from content/guides/handling_data/globus_connect/file_transfers_to_and_from_personal_workstations.md
rename to content/handling_data/data_transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md
diff --git a/content/guides/handling_data/globus_connect/globus_command_line_interface.md b/content/handling_data/data_transfer/globus_connect/globus_command_line_interface.md
similarity index 100%
rename from content/guides/handling_data/globus_connect/globus_command_line_interface.md
rename to content/handling_data/data_transfer/globus_connect/globus_command_line_interface.md
diff --git a/content/handling_data/data_transfer/high_speed_data_transfers.md b/content/handling_data/data_transfer/high_speed_data_transfers.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/handling_data/using_rclone_with_hcc.md b/content/handling_data/data_transfer/using_rclone_with_hcc.md
old mode 100755
new mode 100644
similarity index 99%
rename from content/guides/handling_data/using_rclone_with_hcc.md
rename to content/handling_data/data_transfer/using_rclone_with_hcc.md
index 2069d6b8dece6cc6051bc66869935888bd080489..02fc1c2617c7fba88dcd5772a5ec622bc5fab1ee
--- a/content/guides/handling_data/using_rclone_with_hcc.md
+++ b/content/handling_data/data_transfer/using_rclone_with_hcc.md
@@ -1,7 +1,7 @@
 +++
 title = "Using Rclone for File Transfer"
 description = "How to use Rclone with HCC"
-weight = 9
+weight =30
 +++
 
 Rclone is an open source file transfer tool to make transfering files to and from various cloud resources such as Box, Amazon S3, Microsoft OneDrive, and Google Cloud Storage and your local machine a simpler task. Guides on how to set up a variety of resources to transfer to and from can be found at [rclone's webpage](https://rclone.org/). 
diff --git a/content/intro/_index.md b/content/intro/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..bab84a3973a2f2d65dc91824add3d6e20fbb11d9
--- /dev/null
+++ b/content/intro/_index.md
@@ -0,0 +1,42 @@
++++
+title = "Introduction to HPC"
+description = "What is a cluster and what is HPC"
+weight = "10"
++++
+
+## What is HPC
+High-Performance Computing (HPC) is the use of groups of computers to solve 
+computations a user or group would not be able to solve in a reasonable time-frame 
+on their own desktop or laptop. This is often achieved by splitting one large job 
+amongst numerous cores or 'workers'. This is similar to how a skyscraper is built 
+by numerous individuals rather than a single person. Many fields take advantage of 
+HPC including bioinformatics, chemistry, materials engineering, and newer fields 
+such as educational psychology and philosophy.
+
+{{< figure src="/images/cluster.png" height="450" >}}
+
+HPC clusters consist of four primary parts, the login node, management node, workers, 
+and a central storage array. All of these parts are bound together with a scheduler 
+such as HTCondor or SLURM. 
+
+#### Login Node:
+Users will automatically land on the login node when they log in to the clusters. 
+You will [submit jobs] ({{< ref "/submitting_jobs" >}}) using one of the schedulers 
+and pull the results of your jobs. Any jobs running on the login node directly will be 
+stopped so others can use the login node to submit jobs.
+
+#### Management Node: 
+The management node does as it sounds, it manages the cluster and provides a central 
+point to manage the rest of the systems.
+
+#### Worker Nodes:
+The worker nodes are what run and process your jobs that are submitted from the schedulers. 
+Through the use of the schedulers, more work can be efficiently done by squeezing in all 
+jobs possible for the resources requested throughout the nodes. They also allow for fair 
+use computing by making sure one user or group is not using the entire cluster at once 
+and allowing others to use the clusters.
+
+#### Central Storage Array:
+The central storage array allows all of the nodes within the cluster to have access to 
+the same files without needing to transfer them around. HCC has three arrays mounted on 
+the clusters with more details [here]({{< ref "/handling_data" >}}).
diff --git a/content/quickstarts/_index.md b/content/quickstarts/_index.md
deleted file mode 100644
index d2f1201d7a2cd269d3625cc8576b1251852e5f91..0000000000000000000000000000000000000000
--- a/content/quickstarts/_index.md
+++ /dev/null
@@ -1,10 +0,0 @@
-+++
-title = "Quickstarts"
-weight = "10"
-+++
-
-The quick start guides require that you already have a HCC account.  You
-can get a HCC account by applying on the
-[HCC website] (http://hcc.unl.edu/newusers/)
-
-{{% children %}}
diff --git a/content/quickstarts/connecting/_index.md b/content/quickstarts/connecting/_index.md
deleted file mode 100644
index b0149d21ca0095c145057e8f14414d104e7132af..0000000000000000000000000000000000000000
--- a/content/quickstarts/connecting/_index.md
+++ /dev/null
@@ -1,20 +0,0 @@
-+++
-title = "How to Connect"
-description = "What is a cluster and what is HPC"
-weight = "9"
-+++
-High-Performance Computing is the use of groups of computers to solve computations a user or group would not be able to solve in a reasonable time-frame on their own desktop or laptop. This is often achieved by splitting one large job amongst numerous cores or 'workers'. This is similar to how a skyscraper is built by numerous individuals rather than a single person. Many fields take advantage of HPC including bioinformatics, chemistry, materials engineering, and newer fields such as educational psychology and philosophy.
-{{< figure src="/images/cluster.png" height="450" >}}
-HPC clusters consist of four primary parts, the login node, management node, workers, and a central storage array. All of these parts are bound together with a scheduler such as HTCondor or SLURM. 
-</br></br>
-#### Login Node:
-Users will automatically land on the login node when they log in to the clusters. You will [submit jobs] ({{< ref "/guides/submitting_jobs" >}}) using one of the schedulers and pull the results of your jobs. Running jobs on the login node directly will be stopped so others can use the login node to submit jobs.
-</br></br>
-#### Management Node: 
-The management node does as it sounds, it manages the cluster and provides a central point to manage the rest of the systems.
-</br></br>
-#### Worker Nodes:
-The worker nodes are what run and process your jobs that are submitted from the schedulers. Through the use of the schedulers, more work can be efficiently done by squeezing in all jobs possible for the resources requested throughout the nodes. They also allow for fair use computing by making sure one user or group is not using the entire cluster at once and allowing others to use the clusters.
-</br></br> 
-#### Central Storage Array:
-The central storage array allows all of the nodes within the cluster to have access to the same files without needing to transfer them around. HCC has three arrays mounted on the clusters with more details [here]({{< ref "/guides/handling_data" >}}).
diff --git a/content/quickstarts/submitting_jobs.md b/content/submitting_jobs/_index.md
similarity index 79%
rename from content/quickstarts/submitting_jobs.md
rename to content/submitting_jobs/_index.md
index e2ab2fa20ca39a9b7f670d173af745324ad555ba..08a194161b5ed62c3af83e47642e527e062e7cef 100644
--- a/content/quickstarts/submitting_jobs.md
+++ b/content/submitting_jobs/_index.md
@@ -1,18 +1,18 @@
 +++
 title = "Submitting Jobs"
 description =  "How to submit jobs to HCC resources"
-weight = "10"
+weight = "50"
 +++
 
 Crane and Rhino are managed by
-the [SLURM](https://slurm.schedmd.com) resource manager.  
+the [SLURM](https://slurm.schedmd.com) resource manager.  
 In order to run processing on Crane, you
 must create a SLURM script that will run your processing. After
 submitting the job, SLURM will schedule your processing on an available
 worker node.
 
 Before writing a submit file, you may need to
-[compile your application]({{< relref "/guides/running_applications/compiling_source_code" >}}).
+[compile your application]({{< relref "/applications/user_software" >}}).
 
 - [Ensure proper working directory for job output](#ensure-proper-working-directory-for-job-output)
 - [Creating a SLURM Submit File](#creating-a-slurm-submit-file)
@@ -53,7 +53,7 @@ look at the [MPI Submission Guide.]({{< relref "submitting_an_mpi_job" >}})
 {{% /notice %}}
 
 A SLURM submit file is broken into 2 sections, the job description and
-the processing.  SLURM job description are prepended with `#SBATCH` in
+the processing.  SLURM job description are prepended with `#SBATCH` in
 the submit file.
 
 **SLURM Submit File**
@@ -73,10 +73,10 @@ sleep 60
 {{< /highlight >}}
 
 - **time**  
-  Maximum walltime the job can run.  After this time has expired, the
+  Maximum walltime the job can run.  After this time has expired, the
   job will be stopped.
 - **mem-per-cpu**  
-  Memory that is allocated per core for the job.  If you exceed this
+  Memory that is allocated per core for the job.  If you exceed this
   memory limit, your job will be stopped.
 - **mem**  
   Specify the real memory required per node in MegaBytes. If you
@@ -84,25 +84,25 @@ sleep 60
   should ask for less memory than each node actually has. For Crane, the
   max is 500GB.
 - **job-name**
-  The name of the job.  Will be reported in the job listing.
+  The name of the job.  Will be reported in the job listing.
 - **partition**  
-  The partition the job should run in.  Partitions determine the job's
-  priority and on what nodes the partition can run on.  See the
+  The partition the job should run in.  Partitions determine the job's
+  priority and on what nodes the partition can run on.  See the
   [Partitions]({{< relref "/guides/submitting_jobs/partitions/_index.md" >}}) page for a list of possible partitions.
 - **error**  
-  Location of the stderr will be written for the job.  `[groupname]`
-  and `[username]` should be replaced your group name and username.
-  Your username can be retrieved with the command `id -un` and your
-  group with `id -ng`.
+  Location of the stderr will be written for the job.  `[groupname]`
+  and `[username]` should be replaced your group name and username.
+  Your username can be retrieved with the command `id -un` and your
+  group with `id -ng`.
 - **output**  
   Location of the stdout will be written for the job.
 
 More advanced submit commands can be found on the [SLURM Docs](https://slurm.schedmd.com/sbatch.html).
-You can also find an example of a MPI submission on [Submitting an MPI Job]({{< relref "submitting_an_mpi_job" >}}).
+You can also find an example of a MPI submission on [Submitting an MPI Job]({{< relref "submitting_an_mpi_job" >}}).
 
 ### Submitting the job
 
-Submitting the SLURM job is done by command `sbatch`.  SLURM will read
+Submitting the SLURM job is done by command `sbatch`.  SLURM will read
 the submit file, and schedule the job according to the description in
 the submit file.
 
@@ -115,14 +115,14 @@ Submitted batch job 24603
 {{< /highlight >}}
 {{% /panel %}}
 
-The job was successfully submitted.
+The job was successfully submitted.
 
 ### Checking Job Status
 
-Job status is found with the command `squeue`.  It will provide
+Job status is found with the command `squeue`.  It will provide
 information such as:
 
-- The State of the job: 
+- The State of the job: 
     - **R** - Running
     - **PD** - Pending - Job is awaiting resource allocation.
     - Additional codes are available
@@ -133,7 +133,7 @@ information such as:
 - Nodes running the job
 
 Checking the status of the job is easiest by filtering by your username,
-using the `-u` option to squeue.
+using the `-u` option to squeue.
 
 {{< highlight batch >}}
 $ squeue -u <username>
@@ -143,7 +143,7 @@ $ squeue -u <username>
 
 Additionally, if you want to see the status of a specific partition, for
 example if you are part of a [partition]({{< relref "/guides/submitting_jobs/partitions/_index.md" >}}),
-you can use the `-p` option to `squeue`:
+you can use the `-p` option to `squeue`:
 
 {{< highlight batch >}}
 $ squeue -p esquared
@@ -157,7 +157,7 @@ $ squeue -p esquared
 #### Checking Job Start
 
 You may view the start time of your job with the
-command `squeue --start`.  The output of the command will show the
+command `squeue --start`.  The output of the command will show the
 expected start time of the jobs.
 
 {{< highlight batch >}}
@@ -179,11 +179,11 @@ $ squeue --start --user lypeng
 The output shows the expected start time of the jobs, as well as the
 reason that the jobs are currently idle (in this case, low priority of
 the user due to running numerous jobs already).
- 
+ 
 #### Removing the Job
 
-Removing the job is done with the `scancel` command.  The only argument
-to the `scancel` command is the job id.  For the job above, the command
+Removing the job is done with the `scancel` command.  The only argument
+to the `scancel` command is the job id.  For the job above, the command
 is:
 
 {{< highlight batch >}}
@@ -192,4 +192,4 @@ $ scancel 24605
 
 ### Next Steps
 
-{{% children  %}} 
+{{% children  %}} 
diff --git a/content/submitting_jobs/app_specific/_index.md b/content/submitting_jobs/app_specific/_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..f720ecdfa71afa61e5f571d07af2dd67bcc8d2bb
--- /dev/null
+++ b/content/submitting_jobs/app_specific/_index.md
@@ -0,0 +1,9 @@
++++
+title = "Application Specific Guides"
+weight = "100"
++++
+
+In-depth guides for running applications on HCC resources
+--------------------------------------
+
+{{% children description="true" %}}
diff --git a/content/submitting_jobs/app_specific/submitting_an_openmp_job.md b/content/submitting_jobs/app_specific/submitting_an_openmp_job.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/content/guides/submitting_jobs/submitting_ansys_jobs.md b/content/submitting_jobs/app_specific/submitting_ansys_jobs.md
similarity index 94%
rename from content/guides/submitting_jobs/submitting_ansys_jobs.md
rename to content/submitting_jobs/app_specific/submitting_ansys_jobs.md
index cf5953b1606d2464937fd3e139b863e00050d94e..26d0936195a6689d7b105050dcbcf67476c83487 100644
--- a/content/guides/submitting_jobs/submitting_ansys_jobs.md
+++ b/content/submitting_jobs/app_specific/submitting_ansys_jobs.md
@@ -37,5 +37,5 @@ Details of SLURM job submission can be found at [SUBMITTING JOBS]({{< relref "su
 ### Running ANSYS interactively
 
 1. To use graphical user interface, users need to first setup X11 forwarding. [HOW TO SETUP X11 FORWARDING]({{< relref "how_to_setup_x11_forwarding" >}})
-1. Start an interactie job using srun. NOTE: users need to add \--licenses=ansys_research or \--licenses=ansys_teaching to the srun command. [SUBMITTING AN INTERACTIVE JOB]({{< relref "submitting_an_interactive_job" >}})
+1. Start an interactie job using srun. NOTE: users need to add \--licenses=ansys_research or \--licenses=ansys_teaching to the srun command. [SUBMITTING AN INTERACTIVE JOB]({{< relref "creating_an_interactive_job" >}})
 1. After the interactive job starts, execute "module load ansys/19.2", then run the ANSYS command, e.g. fluent, from command line. The GUI will show up if steps 1-2 are configured correctly. 
diff --git a/content/guides/submitting_jobs/submitting_cuda_or_openacc_jobs.md b/content/submitting_jobs/app_specific/submitting_cuda_or_openacc_jobs.md
similarity index 97%
rename from content/guides/submitting_jobs/submitting_cuda_or_openacc_jobs.md
rename to content/submitting_jobs/app_specific/submitting_cuda_or_openacc_jobs.md
index 4b34235e4dca0cd0cd3a200b68033e6fc77c1d38..1e1b17645bd19e013660c8efd2b135294b8d5650 100644
--- a/content/guides/submitting_jobs/submitting_cuda_or_openacc_jobs.md
+++ b/content/submitting_jobs/app_specific/submitting_cuda_or_openacc_jobs.md
@@ -1,5 +1,5 @@
 +++
-title = "Submitting GPU jobs"
+title = "Submitting GPU Jobs"
 description =  "How to submit GPU (CUDA/OpenACC) jobs on HCC resources."
 +++
 
@@ -77,7 +77,7 @@ You must verify the GPU type and memory combination is valid based on the [avail
 ### Compiling
 
 Compilation of CUDA or OpenACC jobs must be performed on the GPU nodes.
-Therefore, you must run an [interactive job]({{< relref "submitting_an_interactive_job" >}})
+Therefore, you must run an [interactive job]({{< relref "creating_an_interactive_job" >}})
 to compile. An example command to compile in the `gpu` partition could be:
 
 {{< highlight batch >}}
diff --git a/content/guides/submitting_jobs/submitting_matlab_jobs.md b/content/submitting_jobs/app_specific/submitting_matlab_jobs.md
similarity index 100%
rename from content/guides/submitting_jobs/submitting_matlab_jobs.md
rename to content/submitting_jobs/app_specific/submitting_matlab_jobs.md
diff --git a/content/guides/submitting_jobs/submitting_r_jobs.md b/content/submitting_jobs/app_specific/submitting_r_jobs.md
similarity index 100%
rename from content/guides/submitting_jobs/submitting_r_jobs.md
rename to content/submitting_jobs/app_specific/submitting_r_jobs.md
diff --git a/content/guides/submitting_jobs/submitting_an_interactive_job.md b/content/submitting_jobs/creating_an_interactive_job.md
similarity index 96%
rename from content/guides/submitting_jobs/submitting_an_interactive_job.md
rename to content/submitting_jobs/creating_an_interactive_job.md
index ba5eed2c9127813c6c11c519fc69413c1da22a5e..1100a049883b84672e113bfef47f3dbe5846e841 100644
--- a/content/guides/submitting_jobs/submitting_an_interactive_job.md
+++ b/content/submitting_jobs/creating_an_interactive_job.md
@@ -1,6 +1,7 @@
 +++
-title = "Submitting an Interactive Job"
+title = "Creating an Interactive Job"
 description =  "How to run an interactive job on HCC resources."
+weight=20
 +++
 
 {{% notice info %}}
diff --git a/content/guides/submitting_jobs/hcc_acknowledgment_credit.md b/content/submitting_jobs/hcc_acknowledgment_credit.md
similarity index 99%
rename from content/guides/submitting_jobs/hcc_acknowledgment_credit.md
rename to content/submitting_jobs/hcc_acknowledgment_credit.md
index d04c68342486a900504f7988a4cf4f015039ae17..0ad8a0291ac59b29ae3a6aa3b72331ee1319ab15 100644
--- a/content/guides/submitting_jobs/hcc_acknowledgment_credit.md
+++ b/content/submitting_jobs/hcc_acknowledgment_credit.md
@@ -1,6 +1,7 @@
 +++
 title = "HCC Acknowledgment Credit"
 description =  "Details on the Acknowledgment Credit system."
+weight=90
 +++
 
 {{% notice note %}}
diff --git a/content/guides/submitting_jobs/job_dependencies.md b/content/submitting_jobs/job_dependencies.md
similarity index 99%
rename from content/guides/submitting_jobs/job_dependencies.md
rename to content/submitting_jobs/job_dependencies.md
index 98c96f2d3f00c797cc6cff15c339134cab1c5559..36aa971e848093c509eed7076212f6448a338b33 100644
--- a/content/guides/submitting_jobs/job_dependencies.md
+++ b/content/submitting_jobs/job_dependencies.md
@@ -1,6 +1,7 @@
 +++
 title = "Job Dependencies"
 description =  "How to use job dependencies with the SLURM scheduler."
+weight=55
 +++
 
 The job dependency feature of SLURM is useful when you need to run
diff --git a/content/guides/submitting_jobs/monitoring_jobs.md b/content/submitting_jobs/monitoring_jobs.md
similarity index 99%
rename from content/guides/submitting_jobs/monitoring_jobs.md
rename to content/submitting_jobs/monitoring_jobs.md
index 2e5d2e581cf69530d46926adcd6b66da617bb048..b6b458eb6ced01143f60d2755f583e74f3a6eec1 100644
--- a/content/guides/submitting_jobs/monitoring_jobs.md
+++ b/content/submitting_jobs/monitoring_jobs.md
@@ -1,6 +1,7 @@
 +++
 title = "Monitoring Jobs"
 description =  "How to find out information about running and completed jobs."
+weight=55
 +++
 
 Careful examination of running times, memory usage and output files will
diff --git a/content/guides/submitting_jobs/partitions/_index.md b/content/submitting_jobs/partitions/_index.md
similarity index 99%
rename from content/guides/submitting_jobs/partitions/_index.md
rename to content/submitting_jobs/partitions/_index.md
index 751ebb357701143dcf1fddc2e00352a69e144de4..88466f8e78b589cddeeb0d27bc3db882dcfdb0a4 100644
--- a/content/guides/submitting_jobs/partitions/_index.md
+++ b/content/submitting_jobs/partitions/_index.md
@@ -1,8 +1,9 @@
 +++
-title = "Partitions"
+title = "Available Partitions"
 description =  "Listing of partitions on Crane and Rhino."
 scripts = ["https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/jquery.tablesorter.min.js", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-pager.min.js","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-filter.min.js","/js/sort-table.js"]
 css = ["http://mottie.github.io/tablesorter/css/theme.default.css","https://mottie.github.io/tablesorter/css/theme.dropbox.css", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/jquery.tablesorter.pager.min.css","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/filter.formatter.min.css"]
+weight=70
 +++
 
 Partitions are used on Crane and Rhino to distinguish different
diff --git a/content/guides/submitting_jobs/partitions/crane_available_partitions.md b/content/submitting_jobs/partitions/crane_available_partitions.md
similarity index 100%
rename from content/guides/submitting_jobs/partitions/crane_available_partitions.md
rename to content/submitting_jobs/partitions/crane_available_partitions.md
diff --git a/content/guides/submitting_jobs/partitions/rhino_available_partitions.md b/content/submitting_jobs/partitions/rhino_available_partitions.md
similarity index 100%
rename from content/guides/submitting_jobs/partitions/rhino_available_partitions.md
rename to content/submitting_jobs/partitions/rhino_available_partitions.md
diff --git a/content/guides/sandstone/_index.md b/content/submitting_jobs/sandstone.md
similarity index 96%
rename from content/guides/sandstone/_index.md
rename to content/submitting_jobs/sandstone.md
index 8106deffb71bdc37ce182d6acc5b089b2e068571..079d5693180f15a14ae8c86c1ed4499dfb7e3613 100644
--- a/content/guides/sandstone/_index.md
+++ b/content/submitting_jobs/sandstone.md
@@ -1,51 +1,51 @@
-+++
-title = "Sandstone"
-description =  "How to use HCC's sandstone environment"
-weight = "45"
-+++
-
-
-### Overview
-
-The HCC Sandstone environment is a GUI interface to the Crane cluster featuring a file browser, text editor, web terminal and SLURM script helper,
-
-To login to the Sandstone environment, go to [crane.unl.edu](https://crane.unl.edu) in your web browser and sign in using your HCC Login Info and DUO authentication.
-
-Upon login, you will land at the File Browser.
-
-
-### File Browser
-The file browser allows you to view, access, and transfer files on Crane. On the left side you will have your available spaces, both your home and work directories. In the upper right of the page, you have buttons to upload files, create a file, and create a directory. 
-
-{{< figure src="/images/SandstonefileBrowserOver.png">}}
-
-Clicking on either box under "My Spaces" will change your current directory to either your home or work directory and display your user/group usage and quotas. You can then navigate directories by clicking through them in a similar manner as you would with Windows or MacOS. 
-
-{{< figure src="/images/SandstonefileOptions.png">}}
-
-Clicking on a file or directory will bring up some options such as the permissions and actions to do such as editing the file, duplicating or moving it, deleting it, and downloading it.  
-
-### Editor
-The editor is a basic text editor that allows you to have multiple files loaded and manipulate the files. A small file explorer is available on the left side to access more files. There are similar actions available for files above the mini file browser. 
-
-{{< figure src="/images/Sandstoneeditor.png">}}
-
-Like most text editors, basic functions exist to undo and redo changes, find and replace, and most importantly, to save the file. 
-
-{{< figure src="/images/SandstoneedtiorDropDown.png">}}
-
-### Terminal
-
-The terminal gives you access to the linux command line on crane, similar to what you would have if you SSH'd directly into Crane. Once the login and quote screen, you can enter commands and interact as you would with a standard terminal.
-{{< figure src="/images/SandstoneTerminal.png">}}
-
-### Slurm Assist
-
-Slurm assist is a tool to help create and run slurm submit scripts. The first step is to select a base profile from the profile dropdown menu. Options will appear and the directives will automatically appear. The options are editable to better fit to your specific job with more details found in our submitting jobs documentation. After the directives are filled out, you can then add the commands to start your job in the script section. To save the job, select 'save script for later' and save the script in a known location for later.
-{{< figure src="/images/SandstoneSASettings.png">}}
-From here, you can also schedule the script recently create, by selecting "Schedule Job". A confirmation will appear with the Job ID and then an instruction on how to view the status of your job.
-{{< figure src="/images/SandstoneJobConf.png">}}
-{{< figure src="/images/SandstoneSAStatus.png">}}
-You can view the progress of other jobs from slurm assist by going to the status page. Here you will see the State of the job, its ID, name, group name, runtime, and the start and end times. 
-{{< figure src="/images/SandstoneSAStatusPage.png">}}
-{{< figure src="/images/SandstoneSAStatuses.png">}}
\ No newline at end of file
++++
+title = "Sandstone"
+description =  "How to use HCC's sandstone environment"
+weight = "95"
++++
+
+
+### Overview
+
+The HCC Sandstone environment is a GUI interface to the Crane cluster featuring a file browser, text editor, web terminal and SLURM script helper,
+
+To login to the Sandstone environment, go to [crane.unl.edu](https://crane.unl.edu) in your web browser and sign in using your HCC Login Info and DUO authentication.
+
+Upon login, you will land at the File Browser.
+
+
+### File Browser
+The file browser allows you to view, access, and transfer files on Crane. On the left side you will have your available spaces, both your home and work directories. In the upper right of the page, you have buttons to upload files, create a file, and create a directory. 
+
+{{< figure src="/images/SandstonefileBrowserOver.png">}}
+
+Clicking on either box under "My Spaces" will change your current directory to either your home or work directory and display your user/group usage and quotas. You can then navigate directories by clicking through them in a similar manner as you would with Windows or MacOS. 
+
+{{< figure src="/images/SandstonefileOptions.png">}}
+
+Clicking on a file or directory will bring up some options such as the permissions and actions to do such as editing the file, duplicating or moving it, deleting it, and downloading it.  
+
+### Editor
+The editor is a basic text editor that allows you to have multiple files loaded and manipulate the files. A small file explorer is available on the left side to access more files. There are similar actions available for files above the mini file browser. 
+
+{{< figure src="/images/Sandstoneeditor.png">}}
+
+Like most text editors, basic functions exist to undo and redo changes, find and replace, and most importantly, to save the file. 
+
+{{< figure src="/images/SandstoneedtiorDropDown.png">}}
+
+### Terminal
+
+The terminal gives you access to the linux command line on crane, similar to what you would have if you SSH'd directly into Crane. Once the login and quote screen, you can enter commands and interact as you would with a standard terminal.
+{{< figure src="/images/SandstoneTerminal.png">}}
+
+### Slurm Assist
+
+Slurm assist is a tool to help create and run slurm submit scripts. The first step is to select a base profile from the profile dropdown menu. Options will appear and the directives will automatically appear. The options are editable to better fit to your specific job with more details found in our submitting jobs documentation. After the directives are filled out, you can then add the commands to start your job in the script section. To save the job, select 'save script for later' and save the script in a known location for later.
+{{< figure src="/images/SandstoneSASettings.png">}}
+From here, you can also schedule the script recently create, by selecting "Schedule Job". A confirmation will appear with the Job ID and then an instruction on how to view the status of your job.
+{{< figure src="/images/SandstoneJobConf.png">}}
+{{< figure src="/images/SandstoneSAStatus.png">}}
+You can view the progress of other jobs from slurm assist by going to the status page. Here you will see the State of the job, its ID, name, group name, runtime, and the start and end times. 
+{{< figure src="/images/SandstoneSAStatusPage.png">}}
+{{< figure src="/images/SandstoneSAStatuses.png">}}
diff --git a/content/guides/submitting_jobs/submitting_a_job_array.md b/content/submitting_jobs/submitting_a_job_array.md
similarity index 99%
rename from content/guides/submitting_jobs/submitting_a_job_array.md
rename to content/submitting_jobs/submitting_a_job_array.md
index 08245c8d7764c9d00d1abbeaf59ddcf3b93d47b3..5c6acfee7b0201daeef507004230cedd48d1784f 100644
--- a/content/guides/submitting_jobs/submitting_a_job_array.md
+++ b/content/submitting_jobs/submitting_a_job_array.md
@@ -1,6 +1,7 @@
 +++
 title = "Submitting a Job Array"
 description =  "How to use job arrays with the SLURM scheduler."
+weight=30
 +++
 
 A job array is a set of jobs that share the same submit file, but will
diff --git a/content/guides/submitting_jobs/submitting_an_mpi_job.md b/content/submitting_jobs/submitting_an_mpi_job.md
similarity index 99%
rename from content/guides/submitting_jobs/submitting_an_mpi_job.md
rename to content/submitting_jobs/submitting_an_mpi_job.md
index 89315e421b87fc772867f7beb02e3f085a60f0e8..918a5253db706710c4e23851574e85c06f9229f9 100644
--- a/content/guides/submitting_jobs/submitting_an_mpi_job.md
+++ b/content/submitting_jobs/submitting_an_mpi_job.md
@@ -1,6 +1,7 @@
 +++
 title = "Submitting an MPI Job"
 description =  "How to submit an MPI job on HCC resources."
+weight=40
 +++
 
 This script requests 16 cores on nodes with InfiniBand:
diff --git a/content/guides/submitting_jobs/submitting_htcondor_jobs.md b/content/submitting_jobs/submitting_htcondor_jobs.md
similarity index 99%
rename from content/guides/submitting_jobs/submitting_htcondor_jobs.md
rename to content/submitting_jobs/submitting_htcondor_jobs.md
index a917caf294139139c2b6ec8759e8595c3c21f0c9..a2a49e095207e1a73e2119706dae2b6068e6714f 100644
--- a/content/guides/submitting_jobs/submitting_htcondor_jobs.md
+++ b/content/submitting_jobs/submitting_htcondor_jobs.md
@@ -1,6 +1,7 @@
 +++
 title = "Submitting HTCondor Jobs"
 description =  "How to submit HTCondor Jobs on HCC resources."
+weight=50
 +++
 
 If you require features of HTCondor, such as DAGMan or Pegasus,