From 097d2b2c6a828508ab3fe1c238d18a684b9ea983 Mon Sep 17 00:00:00 2001
From: Caughlin Bohn <cbohn4@unl.edu>
Date: Wed, 18 May 2022 16:27:48 -0500
Subject: [PATCH] Removed Rhino and Added Swan

---
 content/_index.md                                    | 12 ++++++------
 content/accounts/how_to_change_your_password.md      |  2 +-
 .../bioinformatics_tools/biodata_module.md           |  4 ++--
 ...e_for_rhino.md => available_software_for_swan.md} |  6 +++---
 content/connecting/mobaxterm.md                      |  2 +-
 content/connecting/putty.md                          |  4 ++--
 content/connecting/terminal.md                       |  4 ++--
 content/good_hcc_practices/_index.md                 |  2 +-
 content/handling_data/data_storage/_index.md         |  2 +-
 content/handling_data/data_storage/using_attic.md    |  2 +-
 .../data_storage/using_the_common_file_system.md     |  2 +-
 .../data_transfer/connect_to_cb3_irods.md            |  4 ++--
 content/handling_data/data_transfer/cyberduck.md     |  2 +-
 .../data_transfer/globus_connect/_index.md           |  4 ++--
 .../activating_hcc_cluster_endpoints.md              |  4 ++--
 .../data_transfer/globus_connect/file_sharing.md     |  2 +-
 .../file_transfers_between_endpoints.md              |  2 +-
 ...le_transfers_to_and_from_personal_workstations.md |  2 +-
 .../globus_connect/globus_command_line_interface.md  |  4 ++--
 .../data_transfer/high_speed_data_transfers.md       |  4 ++--
 content/handling_data/data_transfer/winscp.md        |  4 ++--
 content/open_ondemand/connecting_to_hcc_ondemand.md  |  2 +-
 content/submitting_jobs/_index.md                    |  2 +-
 content/submitting_jobs/partitions/_index.md         | 12 ++++++------
 ...le_partitions.md => swan_available_partitions.md} |  8 ++++----
 25 files changed, 49 insertions(+), 49 deletions(-)
 rename content/applications/modules/{available_software_for_rhino.md => available_software_for_swan.md} (91%)
 rename content/submitting_jobs/partitions/{rhino_available_partitions.md => swan_available_partitions.md} (77%)

diff --git a/content/_index.md b/content/_index.md
index 96928042..45707e10 100644
--- a/content/_index.md
+++ b/content/_index.md
@@ -36,14 +36,14 @@ are new to using HCC resources, Crane is the recommended cluster to use
 initially.  Limitations: Crane has only 2 CPU/16 cores and 64GB RAM per
 node. CraneOPA has 2 CPU/36 cores with a maximum of 512GB RAM per node.
 
-**Rhino**: Rhino is intended for large memory (RAM) computing needs.
-Rhino has 4 AMD Interlagos CPUs (64 cores) per node, with either 192GB or 256GB RAM per
+**Swan**: Swan is intended for large memory (RAM) computing needs.
+Swan has 4 AMD Interlagos CPUs (64 cores) per node, with either 192GB or 256GB RAM per
 node in the default partition. For extremely large RAM needs, there is also
 a 'highmem' partition with 2 x 512GB and 2 x 1TB nodes.
 
 **Important Notes**
 
--   The Crane and Rhino clusters are separate. But, they are
+-   The Crane and Swan clusters are separate. But, they are
     similar enough that submission scripts on whichever one will work on
     another, and vice versa (excluding GPU resources and some combinations of
     RAM/core requests).
@@ -58,9 +58,9 @@ $ cd $WORK
 Resources
 ---------
 
-- ##### Crane - HCC's newest machine, Crane has 7232 Intel Xeon cores in 452 nodes with 64GB RAM per node.
+- ##### Crane -  Crane has 7232 Intel Xeon cores in 452 nodes with 64GB RAM per node.
 
-- ##### Rhino - HCC's AMD-based cluster, intended for large RAM computing needs.
+- ##### Swan - HCC's newest Intel-based cluster, intended for large RAM computing needs.
 
 - ##### Red - This cluster is the resource for UNL's [USCMS](https://uscms.org/) Tier-2 site.
 
@@ -74,7 +74,7 @@ Resource Capabilities
 | Cluster | Overview | Processors | RAM\* | Connection | Storage
 | ------- | ---------| ---------- | --- | ---------- | ------
 | **Crane**   | 572 node LINUX cluster | 452 Intel Xeon E5-2670 2.60GHz 2 CPU/16 cores per node<br> <br>120 Intel Xeon E5-2697 v4 2.3GHz, 2 CPU/36 cores per node<br><br>("CraneOPA") | 452 nodes @ 62.5GB<br><br>79 nodes @ 250GB<br><br>37 nodes @ 500GB<br><br>4 nodes @ 1500GB | QDR Infiniband<br><br>EDR Omni-Path Architecture | ~1.8 TB local scratch per node<br><br>~4 TB local scratch per node<br><br>~1452 TB shared Lustre storage
-| **Rhino** | 110 node LINUX cluster | 110 AMD Interlagos CPUs (6272 / 6376), 4 CPU/64 cores per node | 106 nodes @ 187.5GB/250GB <br><br> 2 nodes @ 500GB<br><br> 2 nodes @ 994GB | QDR Infiniband | ~1.5TB local scratch per node <br><br> ~360TB shared BeeGFS storage |
+| **Swan** | 168 node LINUX cluster | 168 Intel Xeon Gold 6348 CPU, 2 CPU/56 cores per node | 168 nodes @ 256GB <br><br> 2 nodes @ 2000GB | HDR100 Infiniband | 3.5TB local scratch per node <br><br> ~5200TB shared Lustre storage |
 | **Red** | 344 node LINUX cluster | Various Xeon and  Opteron processors 7,280 cores maximum, actual number of job slots depends on RAM usage | 1.5-4GB RAM per job slot | 1Gb, 10Gb, and 40Gb Ethernet | ~10.8PB of raw storage space |
 | **Anvil** | 76 Compute nodes (Partially used for cloud, the rest used for general computing), 12 Storage nodes, 2 Network nodes Openstack cloud | 76 Intel Xeon E5-2650 v3 2.30GHz 2 CPU/20 cores per node | 76 nodes @ 256GB | 10Gb Ethernet | 528 TB Ceph shared storage (349TB available now) |
 
diff --git a/content/accounts/how_to_change_your_password.md b/content/accounts/how_to_change_your_password.md
index 49032b7b..7bfabb16 100644
--- a/content/accounts/how_to_change_your_password.md
+++ b/content/accounts/how_to_change_your_password.md
@@ -20,7 +20,7 @@ the following instructions to work.**
 - [Tutorial Video](#tutorial-video)
 
 Every HCC user has a password that is same on all HCC machines
-(Crane, Rhino, Anvil). This password needs to satisfy the HCC
+(Crane, Swan, Anvil). This password needs to satisfy the HCC
 password requirements.
 
 ### HCC password requirements
diff --git a/content/applications/app_specific/bioinformatics_tools/biodata_module.md b/content/applications/app_specific/bioinformatics_tools/biodata_module.md
index 97750365..2e2f4eb0 100644
--- a/content/applications/app_specific/bioinformatics_tools/biodata_module.md
+++ b/content/applications/app_specific/bioinformatics_tools/biodata_module.md
@@ -7,7 +7,7 @@ weight = "52"
 +++
 
 
-HCC hosts multiple databases (BLAST, KEGG, PANTHER, InterProScan), genome files, short read aligned indices etc. on Crane and Rhino.  
+HCC hosts multiple databases (BLAST, KEGG, PANTHER, InterProScan), genome files, short read aligned indices etc. on Crane and Swan.  
 In order to use these resources, the "**biodata**" module needs to be loaded first.  
 For how to load module, please check [Module Commands]({{< relref "/applications/modules/_index.md" >}}).
 
@@ -89,4 +89,4 @@ cp /scratch/blast_nucleotide.results .
 
 The organisms and their appropriate environmental variables for all genomes and chromosome files, as well as indices are shown in the table below.
 
-{{< table url="http://rhino-head.unl.edu:8192/bio/data/json" >}}
+{{< table url="http://swan-head.unl.edu:8192/bio/data/json" >}}
diff --git a/content/applications/modules/available_software_for_rhino.md b/content/applications/modules/available_software_for_swan.md
similarity index 91%
rename from content/applications/modules/available_software_for_rhino.md
rename to content/applications/modules/available_software_for_swan.md
index 88ce66a8..7b2e01c5 100644
--- a/content/applications/modules/available_software_for_rhino.md
+++ b/content/applications/modules/available_software_for_swan.md
@@ -1,6 +1,6 @@
 +++
-title = "Available Software for Rhino"
-description = "List of available software for rhino.unl.edu."
+title = "Available Software for Swan"
+description = "List of available software for swan.unl.edu."
 scripts = ["https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/jquery.tablesorter.min.js", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-pager.min.js","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-filter.min.js","/js/sort-table.js"]
 css = ["http://mottie.github.io/tablesorter/css/theme.default.css","https://mottie.github.io/tablesorter/css/theme.dropbox.css", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/jquery.tablesorter.pager.min.css","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/filter.formatter.min.css"]
 +++
@@ -42,4 +42,4 @@ If you are using custom GPU Anaconda Environment, the only module you need to lo
 `module load anaconda`
 {{% /panel %}}
 
-{{< table url="http://rhino-head.unl.edu:8192/lmod/spider/json" >}}
+{{< table url="http://swan-head.unl.edu:8192/lmod/spider/json" >}}
diff --git a/content/connecting/mobaxterm.md b/content/connecting/mobaxterm.md
index 38a417ae..a5425507 100644
--- a/content/connecting/mobaxterm.md
+++ b/content/connecting/mobaxterm.md
@@ -21,7 +21,7 @@ should be replaced by your HCC account username. If you do not have a
 HCC account, please contact a HCC specialist
 ({{< icon name="envelope" >}}[hcc-support@unl.edu](mailto:hcc-support@unl.edu))
 or go to https://hcc.unl.edu/newusers.
-To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu.
+To use the **Swan** cluster, replace crane.unl.edu with with swan.unl.edu.
 {{< figure src="/images/moba/session.png" height="450" >}}
 
 Select OK. You will be asked to enter your password and to authenticate with duo. 
diff --git a/content/connecting/putty.md b/content/connecting/putty.md
index 37db567b..95900569 100644
--- a/content/connecting/putty.md
+++ b/content/connecting/putty.md
@@ -41,8 +41,8 @@ Once you have PuTTY installed, run the application and follow these steps:
 
 {{% notice info %}}
 **Note that the example below uses the `Crane` cluster.
-Replace all instances of `crane` with `rhino` if
-you want to connect to the `Rhino` cluster.
+Replace all instances of `crane` with `swan` if
+you want to connect to the `Swan` cluster.
 {{% /notice %}}
 
 1.  On the first screen, type `crane.unl.edu` for Host Name, then click
diff --git a/content/connecting/terminal.md b/content/connecting/terminal.md
index e66fb603..ab5603a9 100644
--- a/content/connecting/terminal.md
+++ b/content/connecting/terminal.md
@@ -51,8 +51,8 @@ For example, to connect to the Crane cluster type the following in your terminal
 $ ssh <username>@crane.unl.edu
 {{< /highlight >}}
 
-where `<username>` is replaced with your HCC account name. To use the **Rhino** cluster, 
-replace crane.unl.edu with rhino.unl.edu.
+where `<username>` is replaced with your HCC account name. To use the **Swan** cluster, 
+replace crane.unl.edu with swan.unl.edu.
 
 The first time you connect to one of our clusters from a computer, you will be prompted to verify the connection:
 
diff --git a/content/good_hcc_practices/_index.md b/content/good_hcc_practices/_index.md
index c7b07cad..ecc888fa 100644
--- a/content/good_hcc_practices/_index.md
+++ b/content/good_hcc_practices/_index.md
@@ -4,7 +4,7 @@ description = "Guidelines for good HCC practices"
 weight = "95"
 +++
 
-Crane and Rhino, our two high-performance clusters, are shared among all our users. 
+Crane and Swan, our two high-performance clusters, are shared among all our users. 
 Sometimes, some users' activities may negatively impact the clusters and the users. 
 To avoid this, we provide the following guidelines for good HCC practices.
 
diff --git a/content/handling_data/data_storage/_index.md b/content/handling_data/data_storage/_index.md
index d14df5f1..68003652 100644
--- a/content/handling_data/data_storage/_index.md
+++ b/content/handling_data/data_storage/_index.md
@@ -36,7 +36,7 @@ environmental variable (i.e. '`cd $COMMON`')
 
 The common directory operates similarly to work and is mounted with
 **read and write capability to worker nodes all HCC Clusters**. This
-means that any files stored in common can be accessed from Crane and Rhino, making this directory ideal for items that need to be
+means that any files stored in common can be accessed from Crane and Swan, making this directory ideal for items that need to be
 accessed from multiple clusters such as reference databases and shared
 data files.
 
diff --git a/content/handling_data/data_storage/using_attic.md b/content/handling_data/data_storage/using_attic.md
index cdc6c1f4..71e1a087 100644
--- a/content/handling_data/data_storage/using_attic.md
+++ b/content/handling_data/data_storage/using_attic.md
@@ -33,7 +33,7 @@ cost, please see the
 
 The easiest and fastest way to access Attic is via Globus. You can
 transfer files between your computer, our clusters ($HOME, $WORK, and $COMMON on
-Crane or Rhino), and Attic. Here is a detailed tutorial on
+Crane or Swan), and Attic. Here is a detailed tutorial on
 how to set up and use [Globus Connect]({{< relref "/handling_data/data_transfer/globus_connect" >}}). For
 Attic, use the Globus Endpoint **hcc\#attic**.  Your Attic files are
 located at `~, `which is a shortcut
diff --git a/content/handling_data/data_storage/using_the_common_file_system.md b/content/handling_data/data_storage/using_the_common_file_system.md
index 11a5d3e7..8f0206d4 100644
--- a/content/handling_data/data_storage/using_the_common_file_system.md
+++ b/content/handling_data/data_storage/using_the_common_file_system.md
@@ -7,7 +7,7 @@ weight = 30
 ### Quick overview: 
 
 -   Connected read/write to all HCC HPC cluster resources – you will see
-    the same files "in common" on any HCC cluster (i.e. Crane and Rhino).
+    the same files "in common" on any HCC cluster (i.e. Crane and Swan).
 -   30 TB Per-group quota at no charge – larger quota available for
     $105/TB/year
 -   No backups are made!  Don't be silly!  Precious data should still be
diff --git a/content/handling_data/data_transfer/connect_to_cb3_irods.md b/content/handling_data/data_transfer/connect_to_cb3_irods.md
index 0b7eac03..c8a61e04 100644
--- a/content/handling_data/data_transfer/connect_to_cb3_irods.md
+++ b/content/handling_data/data_transfer/connect_to_cb3_irods.md
@@ -45,9 +45,9 @@ data transfers should use CyberDuck instead.
 6.  After logging in, a new explorer window will appear and you will be in your personal directory.  You can transfer files or directories by dragging and dropping them to or from your local machine into the window.
 {{< figure src="/images/30442927.png" class="img-border" height="450" >}}
 
-### Using the iRODS CLI tools from Crane/Rhino
+### Using the iRODS CLI tools from Crane/Swan
 
-The iRODS icommand tools are available on Crane and Rhino to use for data transfer to/from the clusters.
+The iRODS icommand tools are available on Crane and Swan to use for data transfer to/from the clusters.
 They first require creating a small json configuration file.  Create a directory named `~/.irods` first by running
 
 {{< highlight bash >}}
diff --git a/content/handling_data/data_transfer/cyberduck.md b/content/handling_data/data_transfer/cyberduck.md
index 0770749b..7e11033a 100644
--- a/content/handling_data/data_transfer/cyberduck.md
+++ b/content/handling_data/data_transfer/cyberduck.md
@@ -34,7 +34,7 @@ To add an HCC machine, in the bookmarks pane click the "+" icon:
 {{< figure src="/images/7274500.png" height="450" >}}
 
 Ensure the type of connection is SFTP.  Enter the hostname of the machine 
-you wish to connect to (crane.unl.edu, rhino.unl.edu) in the **Server**
+you wish to connect to (crane.unl.edu, swan.unl.edu) in the **Server**
 field, and your HCC username in the **Username** field.  The
 **Nickname** field is arbitrary, so enter whatever you prefer.
 
diff --git a/content/handling_data/data_transfer/globus_connect/_index.md b/content/handling_data/data_transfer/globus_connect/_index.md
index 56ced75f..fd772688 100644
--- a/content/handling_data/data_transfer/globus_connect/_index.md
+++ b/content/handling_data/data_transfer/globus_connect/_index.md
@@ -8,7 +8,7 @@ weight = 5
 a fast and robust file transfer service that allows users to quickly
 move large amounts of data between computer clusters and even to and
 from personal workstations.  This service has been made available for
-Crane, Rhino, and Attic. HCC users are encouraged to use Globus
+Crane, Swan, and Attic. HCC users are encouraged to use Globus
 Connect for their larger data transfers as an alternative to slower and
 more error-prone methods such as scp and winSCP. 
 
@@ -16,7 +16,7 @@ more error-prone methods such as scp and winSCP. 
 
 ### Globus Connect Advantages
 
--   Dedicated transfer servers on Crane, Rhino, and Attic allow
+-   Dedicated transfer servers on Crane, Swan, and Attic allow
     large amounts of data to be transferred quickly between sites.
 
 -   A user can install Globus Connect Personal on his or her workstation
diff --git a/content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md b/content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md
index 47dfc22e..2c0d5234 100644
--- a/content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md
+++ b/content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md
@@ -4,13 +4,13 @@ description = "How to activate HCC endpoints on Globus"
 weight = 20
 +++
 
-You will not be able to transfer files to or from an HCC endpoint using Globus Connect without first activating the endpoint.  Endpoints are available for Crane (`hcc#crane`), Rhino, (`hcc#rhino`), and Attic (`hcc#attic`).  Follow the instructions below to activate any of these endpoints and begin making transfers.
+You will not be able to transfer files to or from an HCC endpoint using Globus Connect without first activating the endpoint.  Endpoints are available for Crane (`hcc#crane`), Swan, (`hcc#swan`), and Attic (`hcc#attic`).  Follow the instructions below to activate any of these endpoints and begin making transfers.
 
 1.  [Sign in](https://app.globus.org) to your Globus account using your campus credentials or your Globus ID (if you have one).  Then click on 'Endpoints' in the left sidebar.  
 {{< figure src="/images/Glogin.png" >}}    
 {{< figure src="/images/endpoints.png" >}}
 
-2.  Find the endpoint you want by entering '`hcc#crane`', '`hcc#rhino`', or '`hcc#attic`' in the search box and hit 'enter'.  Once you have found and selected the endpoint, click the green 'activate' icon. On the following page, click 'continue'.
+2.  Find the endpoint you want by entering '`hcc#crane`', '`hcc#swan`', or '`hcc#attic`' in the search box and hit 'enter'.  Once you have found and selected the endpoint, click the green 'activate' icon. On the following page, click 'continue'.
 {{< figure src="/images/activateEndpoint.png" >}}
 {{< figure src="/images/EndpointContinue.png" >}}
 
diff --git a/content/handling_data/data_transfer/globus_connect/file_sharing.md b/content/handling_data/data_transfer/globus_connect/file_sharing.md
index c070782f..5f51c00d 100644
--- a/content/handling_data/data_transfer/globus_connect/file_sharing.md
+++ b/content/handling_data/data_transfer/globus_connect/file_sharing.md
@@ -5,7 +5,7 @@ weight = 50
 +++
 
 If you would like another colleague or researcher to have access to your
-data, you may create a shared endpoint on Crane, Rhino, or Attic. You can personally manage access to this endpoint and
+data, you may create a shared endpoint on Crane, Swan, or Attic. You can personally manage access to this endpoint and
 give access to anybody with a Globus account (whether or not
 they have an HCC account).  *Please use this feature responsibly by
 sharing only what is necessary and granting access only to trusted
diff --git a/content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md b/content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md
index c5d7eaa9..9e58f6ca 100644
--- a/content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md
+++ b/content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md
@@ -7,7 +7,7 @@ weight = 30
 To transfer files between HCC clusters, you will first need to
 [activate]({{< relref "/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints" >}}) the
 two endpoints you would like to use (the available endpoints
-are: `hcc#crane` `hcc#rhino`, and `hcc#attic`).  Once
+are: `hcc#crane` `hcc#swan`, and `hcc#attic`).  Once
 that has been completed, follow the steps below to begin transferring
 files.  (Note: You can also transfer files between an HCC endpoint and
 any other Globus endpoint for which you have authorized access.  That
diff --git a/content/handling_data/data_transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md b/content/handling_data/data_transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md
index 79abd640..5fec3034 100644
--- a/content/handling_data/data_transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md
+++ b/content/handling_data/data_transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md
@@ -28,7 +28,7 @@ endpoints.
      From your Globus account, select the 'File Manager' tab 
 	from the left sidebar and enter the name of your new endpoint the 'Collection' text box. Press 'Enter' and then
     navigate to the appropriate directory. Select "Transfer of Sync to.." from the right sidebar (or select the "two panels"
-	icon from the top right corner) and Enter the second endpoint (for example: `hcc#crane`, `hcc#rhino`, or `hcc#attic`),
+	icon from the top right corner) and Enter the second endpoint (for example: `hcc#crane`, `hcc#swan`, or `hcc#attic`),
 	type or navigate to the desired directory, and initiate the file transfer by clicking on the blue
     arrow button.
 {{< figure src="/images/PersonalTransfer.png" >}}  
diff --git a/content/handling_data/data_transfer/globus_connect/globus_command_line_interface.md b/content/handling_data/data_transfer/globus_connect/globus_command_line_interface.md
index c64a6271..c39d0db8 100644
--- a/content/handling_data/data_transfer/globus_connect/globus_command_line_interface.md
+++ b/content/handling_data/data_transfer/globus_connect/globus_command_line_interface.md
@@ -112,8 +112,8 @@ command:
 All transfers must take place between Globus endpoints. Even if you are
 transferring from an endpoint that you are already connected to, that
 endpoint must be activated in Globus. Here, we are transferring between
-Crane and Rhino. We have activated the Crane endpoint and saved its
-UUID to the variable `$tusker` as we did for `$crane` above.
+Crane and Swan. We have activated the Crane endpoint and saved its
+UUID to the variable `$swan` as we did for `$crane` above.
 
 To transfer files, we use the command `globus transfer`. The format of
 this command is `globus transfer <endpoint1>:<file_path>
diff --git a/content/handling_data/data_transfer/high_speed_data_transfers.md b/content/handling_data/data_transfer/high_speed_data_transfers.md
index 07c5161a..a15c52e1 100644
--- a/content/handling_data/data_transfer/high_speed_data_transfers.md
+++ b/content/handling_data/data_transfer/high_speed_data_transfers.md
@@ -4,7 +4,7 @@ description = "How to transfer files directly from the transfer servers"
 weight = 40
 +++
 
-Crane, Rhino, and Attic each have a dedicated transfer server with
+Crane, Swan, and Attic each have a dedicated transfer server with
 10 Gb/s connectivity that allows
 for faster data transfers than the login nodes.  With [Globus
 Connect]({{< relref "globus_connect" >}}), users
@@ -18,7 +18,7 @@ using these dedicated servers for data transfers:
 Cluster   | Transfer server
 ----------|----------------------
 Crane     | `crane-xfer.unl.edu`
-Rhino     | `rhino-xfer.unl.edu`
+Swan     | `swan-xfer.unl.edu`
 Attic     | `attic-xfer.unl.edu`
 
 {{% notice info %}}
diff --git a/content/handling_data/data_transfer/winscp.md b/content/handling_data/data_transfer/winscp.md
index 4d878040..17dd4818 100644
--- a/content/handling_data/data_transfer/winscp.md
+++ b/content/handling_data/data_transfer/winscp.md
@@ -13,8 +13,8 @@ and the HCC supercomputers through a Graphic User Interface (GUI).
 Download and install the third party application **WinSCP**
 to connect the file systems between your personal computer and the HCC supercomputers. 
 Below is a step-by-step installation guide. Here we use the HCC cluster **Crane**
-for demonstration. To use the **Rhino** cluster, replace `crane.unl.edu`
-with `rhino.unl.edu`.
+for demonstration. To use the **Swan** cluster, replace `crane.unl.edu`
+with `swan.unl.edu`.
 
 1.  On the first screen, type `crane.unl.edu` for Host name, enter your
     HCC account username and password for User name and Password. Then
diff --git a/content/open_ondemand/connecting_to_hcc_ondemand.md b/content/open_ondemand/connecting_to_hcc_ondemand.md
index 202601ff..78345d82 100644
--- a/content/open_ondemand/connecting_to_hcc_ondemand.md
+++ b/content/open_ondemand/connecting_to_hcc_ondemand.md
@@ -7,7 +7,7 @@ weight=10
 To access HCC’s instance of Open OnDemand, use one of the following links.
 
 - For Crane, visit:  [https://crane-ood.unl.edu](https://crane-ood.unl.edu)
-- For Rhino, visit:  [https://rhino-ood.unl.edu](https://rhino-ood.unl.edu)
+- For Swan, visit:  [https://swan-ood.unl.edu](https://swan-ood.unl.edu)
 
 Log in with your HCC username, password, and Duo credentials.
 
diff --git a/content/submitting_jobs/_index.md b/content/submitting_jobs/_index.md
index 10d15454..2ddce693 100644
--- a/content/submitting_jobs/_index.md
+++ b/content/submitting_jobs/_index.md
@@ -4,7 +4,7 @@ description =  "How to submit jobs to HCC resources"
 weight = "50"
 +++
 
-Crane and Rhino are managed by
+Crane and Swan are managed by
 the [SLURM](https://slurm.schedmd.com) resource manager.  
 In order to run processing on Crane, you
 must create a SLURM script that will run your processing. After
diff --git a/content/submitting_jobs/partitions/_index.md b/content/submitting_jobs/partitions/_index.md
index edd64fac..fad3ac3f 100644
--- a/content/submitting_jobs/partitions/_index.md
+++ b/content/submitting_jobs/partitions/_index.md
@@ -1,21 +1,21 @@
 +++
 title = "Available Partitions"
-description =  "Listing of partitions on Crane and Rhino."
+description =  "Listing of partitions on Crane and Swan."
 scripts = ["https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/jquery.tablesorter.min.js", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-pager.min.js","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-filter.min.js","/js/sort-table.js"]
 css = ["http://mottie.github.io/tablesorter/css/theme.default.css","https://mottie.github.io/tablesorter/css/theme.dropbox.css", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/jquery.tablesorter.pager.min.css","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/filter.formatter.min.css"]
 weight=70
 +++
 
-Partitions are used on Crane and Rhino to distinguish different
+Partitions are used on Crane and Swan to distinguish different
 resources. You can view the partitions with the command `sinfo`.
 
 ### Crane:
 
 [Full list for Crane]({{< relref "crane_available_partitions"  >}})
 
-### Rhino:
+### Swan:
 
-[Full list for Rhino]({{< relref "rhino_available_partitions"  >}})
+[Full list for Swan]({{< relref "swan_available_partitions"  >}})
 
 #### Priority for short jobs
 
@@ -38,7 +38,7 @@ priority so it will run as soon as possible.
 
 Overall limitations of maximum job wall time. CPUs, etc. are set for
 all jobs with the default setting (when thea "–qos=" section is omitted)
-and "short" jobs (described as above) on Crane and Rhino.
+and "short" jobs (described as above) on Crane and Swan.
 The limitations are shown in the following form.
 
 |         | SLURM Specification  | Max Job Run Time | Max CPUs per User | Max Jobs per User |
@@ -93,7 +93,7 @@ to owned resources, this method is recommended to maximize job throughput.
 ### Guest Partition
 
 The `guest` partition can be used by users and groups that do not own
-dedicated resources on Crane or Rhino.  Jobs running in the `guest` partition
+dedicated resources on Crane or Swan.  Jobs running in the `guest` partition
 will run on the owned resources with Intel OPA interconnect.  The jobs
 are preempted when the resources are needed by the resource owners and
 are restarted on another node.
diff --git a/content/submitting_jobs/partitions/rhino_available_partitions.md b/content/submitting_jobs/partitions/swan_available_partitions.md
similarity index 77%
rename from content/submitting_jobs/partitions/rhino_available_partitions.md
rename to content/submitting_jobs/partitions/swan_available_partitions.md
index 0f59087b..d02dc1e9 100644
--- a/content/submitting_jobs/partitions/rhino_available_partitions.md
+++ b/content/submitting_jobs/partitions/swan_available_partitions.md
@@ -1,10 +1,10 @@
 +++
-title = "Available Partitions for Rhino"
-description = "List of available partitions for rhino.unl.edu."
+title = "Available Partitions for Swan"
+description = "List of available partitions for swan.unl.edu."
 scripts = ["https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/jquery.tablesorter.min.js", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-pager.min.js","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-filter.min.js","/js/sort-table.js"]
 css = ["http://mottie.github.io/tablesorter/css/theme.default.css","https://mottie.github.io/tablesorter/css/theme.dropbox.css", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/jquery.tablesorter.pager.min.css","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/filter.formatter.min.css"]
 +++
 
-### Rhino:
+### Swan:
 
-{{< table url="http://rhino-head.unl.edu:8192/slurm/partitions/json" >}}
+{{< table url="http://swan-head.unl.edu:8192/slurm/partitions/json" >}}
-- 
GitLab