From 6d868055a05e9edf8b95aef1074c73cd53ed3f3d Mon Sep 17 00:00:00 2001 From: Carrie Brown <cbrown58@unl.edu> Date: Fri, 6 Dec 2019 20:04:31 +0000 Subject: [PATCH] clean up of reorganization efforts --- content/Accounts/_index.md | 9 - content/Accounts/requesting_an_account.md | 10 - content/Anvil/_index.md | 196 ----------- .../Application_Specific_Guides/Jupyter.md | 58 ---- .../blast_with_allinea_performance_reports.md | 65 ---- .../ray_with_allinea_performance_reports.md | 44 --- .../blast/running_blast_alignment.md | 124 ------- .../bioinformatics_tools/biodata_module.md | 88 ----- .../dmtcp_checkpointing.md | 138 -------- .../fortran_c_on_hcc.md | 219 ------------ .../mpi_jobs_on_hcc.md | 322 ------------------ .../using_anaconda_package_manager.md | 305 ----------------- .../linux/linux_file_permissions.md | 48 --- content/Data_Storage/_index.md | 10 - content/Data_Storage/preventing_file_loss.md | 170 --------- content/Data_Storage/using_attic.md | 105 ------ content/Data_Transfer/_index.md | 10 - .../activating_hcc_cluster_endpoints.md | 39 --- .../file_transfers_between_endpoints.md | 50 --- .../high_speed_data_transfers.md | 28 -- .../2013/HCC_Supercomputing_Symposium_2013.md | 31 -- .../Events/2013/hcc_condor_workshop_2013.md | 50 --- .../Events/2013/hcc_matlab_workshop_2013.md | 29 -- content/Events/2013/hcc_mpi_workshop_2013.md | 25 -- .../Events/2016/hcc_qiime_workshop_2016.md | 71 ---- .../2017/unk_linear_algebra_feb_28th_2017.md | 75 ---- ...unl_r_for_biologists_class_march_8_2017.md | 114 ------- content/Events/_index.md | 1 + content/FAQ/_index.md | 177 ---------- content/Guides/_index.md | 10 - content/Quickstarts/_index.md | 10 - content/Quickstarts/connecting/_index.md | 20 -- .../connecting/how_to_change_your_password.md | 95 ------ .../Quickstarts/setting_up_and_using_duo.md | 146 -------- content/Submitting_Jobs/_index.md | 197 ----------- content/Submitting_Jobs/condor_jobs_on_hcc.md | 219 ------------ .../submitting_an_openmp_job.md | 42 --- content/accounts/_index.md | 17 + .../how_to_change_your_password.md | 0 .../setting_up_and_using_duo.md | 0 content/anvil/_index.md | 0 .../{Anvil => anvil}/adding_ssh_key_pairs.md | 0 .../{Anvil => anvil}/anvil_instance_types.md | 0 content/{Anvil => anvil}/available_images.md | 0 .../connecting_to_linux_instances_from_mac.md | 0 ...necting_to_linux_instances_from_windows.md | 0 ...onnecting_to_linux_instances_using_x2go.md | 0 .../connecting_to_the_anvil_vpn.md | 0 .../connecting_to_windows_instances.md | 0 .../{Anvil => anvil}/creating_an_instance.md | 0 .../creating_and_attaching_a_volume.md | 0 .../creating_ssh_key_pairs_on_mac.md | 0 .../creating_ssh_key_pairs_on_windows.md | 0 ...rmatting_and_mounting_a_volume_in_linux.md | 0 ...atting_and_mounting_a_volume_in_windows.md | 0 .../{Anvil => anvil}/resizing_an_instance.md | 0 .../what_are_the_per_group_resource_limits.md | 0 .../{Applications => applications}/_index.md | 2 +- content/applications/app_specific/Jupyter.md | 0 content/applications/app_specific/_index.md | 9 + .../allinea_profiling_and_debugging/_index.md | 0 .../allinea_performance_reports/_index.md | 0 .../blast_with_allinea_performance_reports.md | 0 ...lammps_with_allinea_performance_reports.md | 0 .../ray_with_allinea_performance_reports.md | 0 ...using_allinea_forge_via_reverse_connect.md | 0 .../bioinformatics_tools/_index.md | 0 .../alignment_tools/_index.md | 0 .../alignment_tools/blast/_index.md | 0 .../blast/create_local_blast_database.md | 0 .../blast/running_blast_alignment.md | 0 .../alignment_tools/blat.md | 0 .../alignment_tools/bowtie.md | 0 .../alignment_tools/bowtie2.md | 0 .../alignment_tools/bwa/_index.md | 0 .../bwa/running_bwa_commands.md | 0 .../alignment_tools/clustal_omega.md | 0 .../alignment_tools/tophat_tophat2.md | 0 .../bioinformatics_tools/biodata_module.md | 0 .../data_manipulation_tools/_index.md | 0 .../bamtools/_index.md | 0 .../bamtools/running_bamtools_commands.md | 0 .../samtools/_index.md | 0 .../samtools/running_samtools_commands.md | 0 .../data_manipulation_tools/sratoolkit.md | 0 .../de_novo_assembly_tools/_index.md | 0 .../de_novo_assembly_tools/oases.md | 0 .../de_novo_assembly_tools/ray.md | 0 .../de_novo_assembly_tools/soapdenovo2.md | 0 .../de_novo_assembly_tools/trinity/_index.md | 0 .../running_trinity_in_multiple_steps.md | 0 .../de_novo_assembly_tools/velvet/_index.md | 0 .../running_velvet_with_paired_end_data.md | 0 ...vet_with_single_end_and_paired_end_data.md | 0 .../running_velvet_with_single_end_data.md | 0 .../downloading_sra_data_from_ncbi.md | 0 .../pre_processing_tools/_index.md | 0 .../pre_processing_tools/cutadapt.md | 0 .../pre_processing_tools/prinseq.md | 0 .../pre_processing_tools/scythe.md | 0 .../pre_processing_tools/sickle.md | 0 .../pre_processing_tools/tagcleaner.md | 0 .../bioinformatics_tools/qiime.md | 0 .../reference_based_assembly_tools/_index.md | 0 .../cufflinks.md | 0 .../_index.md | 0 .../cap3.md | 0 .../cd_hit.md | 0 .../app_specific/dmtcp_checkpointing.md | 0 .../app_specific/fortran_c_on_hcc.md | 0 .../app_specific/mpi_jobs_on_hcc.md | 0 .../app_specific}/running_gaussian_at_hcc.md | 0 .../running_matlab_parallel_server.md | 0 ...unning_ocean_land_atmosphere_model_olam.md | 0 .../app_specific}/running_theano.md | 0 .../modules/_index.md} | 10 +- .../modules}/available_software_for_crane.md | 0 .../modules}/available_software_for_rhino.md | 0 content/applications/user_software/_index.md | 29 ++ .../compiling_an_openmp_application.md | 0 .../user_software}/installing_perl_modules.md | 0 .../using_anaconda_package_manager.md | 0 .../user_software}/using_singularity.md | 0 content/{Connecting => connecting}/_index.md | 0 .../basic_linux_commands.md | 0 .../connecting/for_maclinux_users.md | 38 +-- .../connecting/for_windows_users.md | 71 ++-- .../how_to_setup_x11_forwarding.md | 0 .../connecting/mobaxterm_windows.md | 15 +- ...reusing_ssh_connections_in_linux_or_mac.md | 0 content/{Contact_Us => contact_us}/_index.md | 0 .../_index.md} | 62 ++-- content/handling_data/data_storage/_index.md | 129 +++++++ .../data_storage}/data_for_unmc_users_only.md | 0 .../data_storage}/integrating_box_with_hcc.md | 0 .../data_storage/linux_file_permissions.md | 0 .../data_storage/preventing_file_loss.md | 0 .../handling_data/data_storage/using_attic.md | 0 .../using_nus_gitlab_instance/_index.md | 0 .../setting_up_gitlab_on_hcc_clusters.md | 0 .../using_the_common_file_system.md | 0 content/handling_data/data_transfer/_index.md | 20 ++ .../data_transfer}/connect_to_cb3_irods.md | 0 .../data_transfer}/globus_connect/_index.md | 0 .../activating_hcc_cluster_endpoints.md | 0 .../globus_connect/creating_globus_groups.md | 0 .../globus_connect/file_sharing.md | 0 .../file_transfers_between_endpoints.md | 0 ...sfers_to_and_from_personal_workstations.md | 0 .../globus_command_line_interface.md | 0 .../high_speed_data_transfers.md | 0 .../data_transfer}/using_rclone_with_hcc.md | 0 content/intro/_index.md | 42 +++ .../_index.md} | 51 +-- .../submitting_jobs/app_specific/_index.md | 9 + .../app_specific/submitting_an_openmp_job.md | 0 .../app_specific}/submitting_ansys_jobs.md | 2 +- .../submitting_cuda_or_openacc_jobs.md | 2 +- .../app_specific}/submitting_matlab_jobs.md | 0 .../app_specific}/submitting_r_jobs.md | 0 .../creating_an_interactive_job.md} | 3 +- .../hcc_acknowledgment_credit.md | 1 + .../job_dependencies.md | 1 + .../monitoring_jobs.md | 1 + .../partitions/_index.md | 3 +- .../partitions/crane_available_partitions.md | 0 .../partitions/rhino_available_partitions.md | 0 .../sandstone.md} | 102 +++--- .../submitting_a_job_array.md | 1 + .../submitting_an_mpi_job.md | 1 + .../submitting_htcondor_jobs.md | 1 + 171 files changed, 449 insertions(+), 3523 deletions(-) delete mode 100755 content/Accounts/_index.md delete mode 100755 content/Accounts/requesting_an_account.md delete mode 100644 content/Anvil/_index.md delete mode 100644 content/Applications/Application_Specific_Guides/Jupyter.md delete mode 100644 content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md delete mode 100644 content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md delete mode 100644 content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md delete mode 100644 content/Applications/Application_Specific_Guides/bioinformatics_tools/biodata_module.md delete mode 100644 content/Applications/Application_Specific_Guides/dmtcp_checkpointing.md delete mode 100644 content/Applications/Application_Specific_Guides/fortran_c_on_hcc.md delete mode 100644 content/Applications/Application_Specific_Guides/mpi_jobs_on_hcc.md delete mode 100644 content/Applications/Using_Your_Own_Software/using_anaconda_package_manager.md delete mode 100644 content/Connecting/linux/linux_file_permissions.md delete mode 100644 content/Data_Storage/_index.md delete mode 100644 content/Data_Storage/preventing_file_loss.md delete mode 100644 content/Data_Storage/using_attic.md delete mode 100644 content/Data_Transfer/_index.md delete mode 100644 content/Data_Transfer/globus_connect/activating_hcc_cluster_endpoints.md delete mode 100644 content/Data_Transfer/globus_connect/file_transfers_between_endpoints.md delete mode 100644 content/Data_Transfer/high_speed_data_transfers.md delete mode 100644 content/Guides/_index.md delete mode 100755 content/Quickstarts/_index.md delete mode 100644 content/Quickstarts/connecting/_index.md delete mode 100755 content/Quickstarts/connecting/how_to_change_your_password.md delete mode 100755 content/Quickstarts/setting_up_and_using_duo.md delete mode 100644 content/Submitting_Jobs/_index.md delete mode 100644 content/Submitting_Jobs/condor_jobs_on_hcc.md delete mode 100644 content/Submitting_Jobs/submitting_an_openmp_job.md create mode 100644 content/accounts/_index.md rename content/{Accounts => accounts}/how_to_change_your_password.md (100%) mode change 100755 => 100644 rename content/{Accounts => accounts}/setting_up_and_using_duo.md (100%) mode change 100755 => 100644 create mode 100644 content/anvil/_index.md rename content/{Anvil => anvil}/adding_ssh_key_pairs.md (100%) rename content/{Anvil => anvil}/anvil_instance_types.md (100%) rename content/{Anvil => anvil}/available_images.md (100%) rename content/{Anvil => anvil}/connecting_to_linux_instances_from_mac.md (100%) rename content/{Anvil => anvil}/connecting_to_linux_instances_from_windows.md (100%) rename content/{Anvil => anvil}/connecting_to_linux_instances_using_x2go.md (100%) rename content/{Anvil => anvil}/connecting_to_the_anvil_vpn.md (100%) rename content/{Anvil => anvil}/connecting_to_windows_instances.md (100%) rename content/{Anvil => anvil}/creating_an_instance.md (100%) rename content/{Anvil => anvil}/creating_and_attaching_a_volume.md (100%) rename content/{Anvil => anvil}/creating_ssh_key_pairs_on_mac.md (100%) rename content/{Anvil => anvil}/creating_ssh_key_pairs_on_windows.md (100%) rename content/{Anvil => anvil}/formatting_and_mounting_a_volume_in_linux.md (100%) rename content/{Anvil => anvil}/formatting_and_mounting_a_volume_in_windows.md (100%) rename content/{Anvil => anvil}/resizing_an_instance.md (100%) rename content/{Anvil => anvil}/what_are_the_per_group_resource_limits.md (100%) rename content/{Applications => applications}/_index.md (83%) create mode 100644 content/applications/app_specific/Jupyter.md create mode 100644 content/applications/app_specific/_index.md rename content/{Applications/Application_Specific_Guides => applications/app_specific}/allinea_profiling_and_debugging/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/allinea_profiling_and_debugging/allinea_performance_reports/_index.md (100%) create mode 100644 content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md rename content/{Applications/Application_Specific_Guides => applications/app_specific}/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md (100%) create mode 100644 content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md rename content/{Applications/Application_Specific_Guides => applications/app_specific}/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/blast/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md (100%) create mode 100644 content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/blat.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/bowtie.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/bowtie2.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/bwa/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/clustal_omega.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/alignment_tools/tophat_tophat2.md (100%) create mode 100644 content/applications/app_specific/bioinformatics_tools/biodata_module.md rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/data_manipulation_tools/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/data_manipulation_tools/samtools/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/data_manipulation_tools/sratoolkit.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/oases.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/ray.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/downloading_sra_data_from_ncbi.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/pre_processing_tools/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/pre_processing_tools/cutadapt.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/pre_processing_tools/prinseq.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/pre_processing_tools/scythe.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/pre_processing_tools/sickle.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/pre_processing_tools/tagcleaner.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/qiime.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/reference_based_assembly_tools/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md (100%) create mode 100644 content/applications/app_specific/dmtcp_checkpointing.md create mode 100644 content/applications/app_specific/fortran_c_on_hcc.md create mode 100644 content/applications/app_specific/mpi_jobs_on_hcc.md rename content/{Applications/Application_Specific_Guides => applications/app_specific}/running_gaussian_at_hcc.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/running_matlab_parallel_server.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/running_ocean_land_atmosphere_model_olam.md (100%) rename content/{Applications/Application_Specific_Guides => applications/app_specific}/running_theano.md (100%) rename content/{Applications/Using_Modules/module_commands.md => applications/modules/_index.md} (95%) rename content/{Applications/Using_Modules => applications/modules}/available_software_for_crane.md (100%) rename content/{Applications/Using_Modules => applications/modules}/available_software_for_rhino.md (100%) create mode 100644 content/applications/user_software/_index.md rename content/{Applications/Using_Your_Own_Software => applications/user_software}/compiling_an_openmp_application.md (100%) rename content/{Applications/Using_Your_Own_Software => applications/user_software}/installing_perl_modules.md (100%) create mode 100644 content/applications/user_software/using_anaconda_package_manager.md rename content/{Applications/Using_Your_Own_Software => applications/user_software}/using_singularity.md (100%) rename content/{Connecting => connecting}/_index.md (100%) rename content/{Connecting/linux => connecting}/basic_linux_commands.md (100%) rename content/{Quickstarts => }/connecting/for_maclinux_users.md (74%) rename content/{Quickstarts => }/connecting/for_windows_users.md (72%) rename content/{Connecting => connecting}/how_to_setup_x11_forwarding.md (100%) rename content/{Quickstarts => }/connecting/mobaxterm_windows.md (85%) rename content/{Connecting => connecting}/reusing_ssh_connections_in_linux_or_mac.md (100%) rename content/{Contact_Us => contact_us}/_index.md (100%) rename content/{Data_Storage/data_storage_overview.md => handling_data/_index.md} (65%) create mode 100644 content/handling_data/data_storage/_index.md rename content/{Data_Storage => handling_data/data_storage}/data_for_unmc_users_only.md (100%) rename content/{Data_Storage => handling_data/data_storage}/integrating_box_with_hcc.md (100%) create mode 100644 content/handling_data/data_storage/linux_file_permissions.md create mode 100644 content/handling_data/data_storage/preventing_file_loss.md create mode 100644 content/handling_data/data_storage/using_attic.md rename content/{Data_Storage => handling_data/data_storage}/using_nus_gitlab_instance/_index.md (100%) rename content/{Data_Storage => handling_data/data_storage}/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md (100%) rename content/{Data_Storage => handling_data/data_storage}/using_the_common_file_system.md (100%) create mode 100644 content/handling_data/data_transfer/_index.md rename content/{Data_Transfer => handling_data/data_transfer}/connect_to_cb3_irods.md (100%) rename content/{Data_Transfer => handling_data/data_transfer}/globus_connect/_index.md (100%) create mode 100644 content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md rename content/{Data_Transfer => handling_data/data_transfer}/globus_connect/creating_globus_groups.md (100%) rename content/{Data_Transfer => handling_data/data_transfer}/globus_connect/file_sharing.md (100%) create mode 100644 content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md rename content/{Data_Transfer => handling_data/data_transfer}/globus_connect/file_transfers_to_and_from_personal_workstations.md (100%) rename content/{Data_Transfer => handling_data/data_transfer}/globus_connect/globus_command_line_interface.md (100%) create mode 100644 content/handling_data/data_transfer/high_speed_data_transfers.md rename content/{Data_Transfer => handling_data/data_transfer}/using_rclone_with_hcc.md (100%) mode change 100755 => 100644 create mode 100644 content/intro/_index.md rename content/{Quickstarts/submitting_jobs.md => submitting_jobs/_index.md} (79%) create mode 100644 content/submitting_jobs/app_specific/_index.md create mode 100644 content/submitting_jobs/app_specific/submitting_an_openmp_job.md rename content/{Submitting_Jobs => submitting_jobs/app_specific}/submitting_ansys_jobs.md (94%) rename content/{Submitting_Jobs => submitting_jobs/app_specific}/submitting_cuda_or_openacc_jobs.md (98%) rename content/{Submitting_Jobs => submitting_jobs/app_specific}/submitting_matlab_jobs.md (100%) rename content/{Submitting_Jobs => submitting_jobs/app_specific}/submitting_r_jobs.md (100%) rename content/{Submitting_Jobs/submitting_an_interactive_job.md => submitting_jobs/creating_an_interactive_job.md} (96%) rename content/{Submitting_Jobs => submitting_jobs}/hcc_acknowledgment_credit.md (99%) rename content/{Submitting_Jobs => submitting_jobs}/job_dependencies.md (99%) rename content/{Submitting_Jobs => submitting_jobs}/monitoring_jobs.md (99%) rename content/{Submitting_Jobs => submitting_jobs}/partitions/_index.md (99%) rename content/{Submitting_Jobs => submitting_jobs}/partitions/crane_available_partitions.md (100%) rename content/{Submitting_Jobs => submitting_jobs}/partitions/rhino_available_partitions.md (100%) rename content/{Guides/sandstone/_index.md => submitting_jobs/sandstone.md} (96%) rename content/{Submitting_Jobs => submitting_jobs}/submitting_a_job_array.md (99%) rename content/{Submitting_Jobs => submitting_jobs}/submitting_an_mpi_job.md (99%) rename content/{Submitting_Jobs => submitting_jobs}/submitting_htcondor_jobs.md (99%) diff --git a/content/Accounts/_index.md b/content/Accounts/_index.md deleted file mode 100755 index d8ac864c..00000000 --- a/content/Accounts/_index.md +++ /dev/null @@ -1,9 +0,0 @@ -+++ -title = "Accounts" -weight = "20" -+++ - -Guides on basic account operations. --------------------------------------- - -{{% children description="true" %}} diff --git a/content/Accounts/requesting_an_account.md b/content/Accounts/requesting_an_account.md deleted file mode 100755 index d2f1201d..00000000 --- a/content/Accounts/requesting_an_account.md +++ /dev/null @@ -1,10 +0,0 @@ -+++ -title = "Quickstarts" -weight = "10" -+++ - -The quick start guides require that you already have a HCC account. You -can get a HCC account by applying on the -[HCC website] (http://hcc.unl.edu/newusers/) - -{{% children %}} diff --git a/content/Anvil/_index.md b/content/Anvil/_index.md deleted file mode 100644 index a2b7b3f2..00000000 --- a/content/Anvil/_index.md +++ /dev/null @@ -1,196 +0,0 @@ -+++ -title = "Anvil: HCC's Cloud" -description = "How to use Anvil, HCC's OpenStack-based cloud resource" -weight = "60" -+++ - -- [Overview](#overview) -- [Cloud Terms](#cloud-terms) -- [Steps for Access](#steps-for-access) -- [Backups](#backups) - - -{{% notice tip %}} -Have your account and ready to go? Visit the Anvil OpenStack web -interface at https://anvil.unl.edu. -{{% /notice %}} - ---- - -### Overview - -Anvil is the Holland Computing Center's cloud computing resource, based -on the [OpenStack](https://www.openstack.org) software. -OpenStack is a free and open-source software platform for -cloud computing. Anvil was created to address the needs of NU's -research community that are not well served by a traditional -batch-scheduled Linux cluster environment. Examples of use cases that -are well suited to Anvil include: - -- A highly interactive environment, especially GUI applications -- Require root-level access, such as kernel modification or - virtualization work -- Alternate operating systems, such as Windows or other distributions - of Linux -- Test cluster environments for various software frameworks, such as - [Hadoop](http://hadoop.apache.org) - or [Spark](https://spark.apache.org) -- Cluster applications that require a persistent resource, such as a - web or database server - -Using Anvil, one or more virtual machines (VMs) can be easily be created -via a user-friendly web dashboard. Created VMs are then accessible from -HCC clusters, or your own workstation once connected to the Anvil -Virtual Private Network (VPN). Access is through standard means, -typically via SSH for Linux VMs and Remote Desktop for Windows VMs. - -### Cloud Terms - -There are a few terms used within the OpenStack interface and in the -instructions below that may be unfamiliar. The following brief -definitions may be useful. More detailed information is available in -the [OpenStack User Guide](http://docs.openstack.org/user-guide). - -- **Project**: A project is the base unit of ownership in - OpenStack. Resources (CPUs, RAM, storage, etc.) are allocated and - user accounts are associated with a project. Within Anvil, each HCC - research group corresponds directly to a project. Similar to - resource allocation on HCC clusters, the members of a group share - the [project's resources]({{< relref "what_are_the_per_group_resource_limits" >}}). - -- **Image**: An image corresponds to everything needed to create a - virtual machine for a specific operating system (OS), such as Linux - or Windows. HCC creates and maintains [basic Windows and Linux]({{< relref "available_images" >}}) - images for convenience. - Users can also create their own images that can then be uploaded to - OpenStack and used within the project. - -- **Flavor**: A flavor (also known as *instance type*), defines the - parameters (i.e. resources) of the virtual machine. This includes - things such as number of CPUs, amount of RAM, storage, etc. There - are many instance types [available within Anvil]({{< relref "anvil_instance_types" >}}), - designed to meet a variety of needs. - -- **Instance**: An instance is a running virtual machine, created - by combining an image (the basic OS) with a flavor (resources). - That is, *Image + Flavor = Instance*. - -- **Volume**: A volume is a means for persistent storage within - OpenStack. When an instance is destroyed, any additional data that - was on the OS hard drive is lost. A volume can be thought of - similar to an external hard drive. It can be attached to an - instance and accessed as a second drive. When the instance is - destroyed, data on the volume is retained. It can then be attached - and accessed from another instance later. - -### Steps for Access - -The guide below outlines the steps needed to begin using Anvil. Please -note that Anvil is currently in the *beta testing* phase. While -reasonable precautions are taken against data loss, **sole copies of -precious or irreproducible data should not be placed or left on Anvil**. - -1. **Request access to Anvil** - Access and resources are provided on a per-group basis, similar to - HCC clusters. For details, please see [What are the per group - resource limits?]({{< relref "what_are_the_per_group_resource_limits" >}}) - To begin using Anvil, user should fill out the short request form - at http://hcc.unl.edu/request-anvil-access. - An automated confirmation email will be sent. After group owner approves the request, an HCC staff - member will follow-up once access is available. - -2. **Create SSH keys** - OpenStack uses SSH key pairs to identify users and control access to - the VMs themselves, as opposed to the traditional username/password - combination. SSH key pairs consist of two files, a public key and a - private key. The public file can be shared freely; this file will - be uploaded to OpenStack and associated with your account. The - private key file should be treated the same as a password. **Do not - share your private key and always keep it in a secure location.** - Even if you have previously created a key pair for another purpose, - it's best practice to create a dedicated pair for use with Anvil. - The process for creating key pairs is different between Windows and - Mac. Follow the relevant guide below for your operating system. - 1. [Creating SSH key pairs on Windows]({{< relref "creating_ssh_key_pairs_on_windows" >}}) - 2. [Creating SSH key pairs on Mac]({{< relref "creating_ssh_key_pairs_on_mac" >}}) - -3. **Connect to the Anvil VPN** - The Anvil web portal is accessible from the Internet. On the other - hand, for security reasons, the Anvil instances are not generally - accessible from the Internet. In order to access the instances from - on and off-campus, you will need to first be connected to the Anvil - VPN. Follow the instructions below to connect. - 1. [Connecting to the Anvil VPN]({{< relref "connecting_to_the_anvil_vpn" >}}) - -4. **Add the SSH Key Pair to your account** - Before creating your first instance, you'll need to associate the - SSH key created in step 2 with your account. Follow the guide - below to login to the web dashboard and add the key pair. - 1. [Adding SSH Key Pairs]({{< relref "adding_ssh_key_pairs" >}}) - -5. **Create an instance** - Once the setup steps above are completed, you can create an - instance within the web dashboard. Follow the guide below to create - an instance. - 1. [Creating an Instance]({{< relref "creating_an_instance" >}}) - -6. **Connect to your instance** - After an instance has been created, you can connect (login) and - begin to use it. Connecting is done via SSH or X2Go for Linux - instances and via Remote Desktop (RDP) for Windows instances. - Follow the relevant guide below for your instance and the type of - OS you're connecting from. - 1. [Connecting to Windows Instances]({{< relref "connecting_to_windows_instances" >}}) - 2. [Connecting to Linux Instances via SSH from Mac]({{< relref "connecting_to_linux_instances_from_mac" >}}) - 3. [Connecting to Linux instances via SSH from Windows]({{< relref "connecting_to_linux_instances_from_windows" >}}) - 4. [Connecting to Linux instances using X2Go (for images with Xfce)]({{< relref "connecting_to_linux_instances_using_x2go" >}}) - -7. **Create and attach a volume to your instance (optional)** - Volumes are a means within OpenStack for persistent storage. When - an instance is destroyed, all data that was placed on the OS hard - drive is lost. A volume can be thought of similar to an external - hard drive. It can be attached and detached from an instance as - needed. Data on the volume will persist until the volume itself is - destroyed. Creating a volume is an optional step, but may be useful - in certain cases. The process of creating and attaching a volume - from the web dashboard is the same regardless of the type (Linux or - Windows) of instance it will be attached to. Once the volume is - attached, follow the corresponding guide for your instance's OS to - format and make the volume usable within your instance. - 1. [Creating and attaching a volume]({{< relref "creating_and_attaching_a_volume" >}}) - 2. [Formatting and mounting a volume in Windows]({{< relref "formatting_and_mounting_a_volume_in_windows" >}}) - 3. [Formatting and mounting a volume in Linux]({{< relref "formatting_and_mounting_a_volume_in_linux" >}}) - -8. **Transferring files to or from your instance (optional)** - Transferring files to or from an instance is similar to doing so - with a personal laptop or workstation. To transfer between an - instance and another HCC resource, both SCP and [Globus - Connect]({{< relref "/Data_Transfer/globus_connect" >}}) can be used. For transferring - between an instance and a laptop/workstation or another instance, - standard file sharing utilities such as Dropbox or Box can be used. - Globus may also be used, with one stipulation. In order to - transfer files between two personal endpoints, a Globus Plus - subscription is required. As part of HCC's Globus Provider Plan, - HCC can provide this on a per-user basis free of charge. If you are - interested in Globus Plus, please email - {{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) - with your request and a brief explanation. - -## Backups - -HCC creates daily backups of images and volume snapshots for disaster -recovery. All users' images, detached volumes, and volume snapshots will -be backed up on a daily basis. The ephemeral disks of VMs and attached -volumes will NOT be backed up. If you would like your attached volumes -to be backed up, make a snapshot by going to the “Volumes” tab, click -the down arrow next to the button “Edit Volume” of the volume you want -to make a snapshot, then, select “Create Snapshot”. - -Please note the backup function is for disaster recovery use only. HCC -is unable to restore single files within instances. Further, HCC's -disaster recovery backups should not be the only source of backups for -important data. The backup policies are subject to change without prior -notice. To retrieve your backups, please contact HCC. If you have -special concerns please contact us at -{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu). - diff --git a/content/Applications/Application_Specific_Guides/Jupyter.md b/content/Applications/Application_Specific_Guides/Jupyter.md deleted file mode 100644 index b3f5de58..00000000 --- a/content/Applications/Application_Specific_Guides/Jupyter.md +++ /dev/null @@ -1,58 +0,0 @@ -+++ -title = "Jupyter Notebooks on Crane" -description = "How to access and use a Jupyter Notebook" -weight = 20 -+++ - -- [Connecting to Crane] (#connecting-to-crane) -- [Running Code] (#running-code) -- [Opening a Terminal] (#opening-a-terminal) -- [Using Custom Packages] (#using-custom-packages) - -## Connecting to Crane ------------------------ - Jupyter defines it's notebooks ("Jupyter Notebooks") as - an open-source web application that allows you to create and share documents that contain live code, - equations, visualizations and narrative text. Uses include: data cleaning and transformation, numerical simulation, - statistical modeling, data visualization, machine learning, and much more. - -1. To open a Jupyter notebook, [Sign in](https://crane.unl.edu) to crane.unl.edu using your hcc credentials (NOT your - campus credentials). -{{< figure src="/images/jupyterLogin.png" >}} - -2. Select your preferred authentication method. - - {{< figure src="/images/jupyterPush.png" >}} - -3. Choose a job profile. Select "Noteboook via SLURM Job | Small (1 core, 4GB RAM, 8 hours)" for light tasks such as debugging or small-scale testing. -Select the other options based on your computing needs. Note that a SLURM Job will save to your "work" directory. - -{{< figure src="/images/jupyterjob.png" >}} - -## Running Code - -1. Select the "New" dropdown menu and select the file type you want to create. - -{{< figure src="/images/jupyterNew.png" >}} -2. A new tab will open, where you can enter your code. Run your code by selecting the "play" icon. - -{{< figure src="/images/jupyterCode.png">}} - -## Opening a Terminal - -1. From your user home page, select "terminal" from the "New" drop-down menu. -{{< figure src="/images/jupyterTerminal.png">}} -2. A terminal opens in a new tab. You can enter [Linux commands] ({{< relref "basic_linux_commands" >}}) - at the prompt. -{{< figure src="/images/jupyterTerminal2.png">}} - -## Using Custom Packages - -Many popular `python` and `R` packages are already installed and available within Jupyter Notebooks. -However, it is possible to install custom packages to be used in notebooks by creating a custom Anaconda -Environment. Detailed information on how to create such an environment can be found at - [Using an Anaconda Environment in a Jupyter Notebook on Crane]({{< relref "/Applications/Using_Your_Own_Software/using_anaconda_package_manager#using-an-anaconda-environment-in-a-jupyter-notebook-on-crane" >}}). - ---- - - diff --git a/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md b/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md deleted file mode 100644 index 5fd9443a..00000000 --- a/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md +++ /dev/null @@ -1,65 +0,0 @@ -+++ -title = "BLAST with Allinea Performance Reports" -description = "Example of how to profile BLAST using Allinea Performance Reports." -+++ - -Simple example of using -[BLAST]({{< relref "/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/running_blast_alignment" >}}) -with Allinea Performance Reports (`perf-report`) on Crane is shown below: - -{{% panel theme="info" header="blastn_perf_report.submit" %}} -{{< highlight batch >}} -#!/bin/sh -#SBATCH --job-name=BlastN -#SBATCH --nodes=1 -#SBATCH --ntasks=16 -#SBATCH --time=20:00:00 -#SBATCH --mem=50gb -#SBATCH --output=BlastN.info -#SBATCH --error=BlastN.error - -module load allinea -module load blast/2.2.29 - -cd $WORK/<project_folder> -cp -r /work/HCC/DATA/blastdb/nt/ /tmp/ -cp input_reads.fasta /tmp/ - -perf-report --openmp-threads=$SLURM_NTASKS_PER_NODE --nompi `which blastn` \ --query /tmp/input_reads.fasta -db /tmp/nt/nt -out \ -blastn_output.alignments -num_threads $SLURM_NTASKS_PER_NODE - -cp blastn\_output.alignments . -{{< /highlight >}} -{{% /panel %}} - -BLAST uses OpenMP and therefore the Allinea Performance Reports options -`--openmp-threads` and `--nompi` are used. The perf-report -part, `perf-report --openmp-threads=$SLURM_NTASKS_PER_NODE --nompi`, -is placed in front of the actual `blastn` command we want -to analyze. - -{{% notice info %}} -If you see the error "**Allinea Performance Reports - target file -'application' does not exist on this machine... exiting**", this means -that instead of just using the executable '*application*', the full path -to that application is required. This is the reason why in the script -above, instead of using "*blastn*", we use *\`which blastn\`* which -gives the full path of the *blastn* executable. -{{% /notice %}} - -When the application finishes, the performance report is generated in -the working directory. -For the executed application, this is how the report looks like: - -{{< figure src="/images/11635296.png" width="850" >}} - -From the report, we can see that **blastn** is Compute-Bound -application. The difference between mean (11.1 GB) and peak (26.3 GB) -memory is significant, and this may be sign of workload imbalance or a -memory leak. Moreover, 89.6% of the time is spent in synchronizing -threads in parallel regions which can lead to workload imbalance. - -Running Allinea Performance Reports and identifying application -bottlenecks is really useful for improving the application and better -utilization of the available resources. diff --git a/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md b/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md deleted file mode 100644 index de4dc0d5..00000000 --- a/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md +++ /dev/null @@ -1,44 +0,0 @@ -+++ -title = "Ray with Allinea Performance Reports" -description = "Example of how to profile Ray using Allinea Performance Reports" -+++ - -Simple example of using [Ray]({{< relref "/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/ray" >}}) -with Allinea PerformanceReports (`perf-report`) on Tusker is shown below: - -{{% panel theme="info" header="ray_perf_report.submit" %}} -{{< highlight batch >}} -#!/bin/sh -#SBATCH --job-name=Ray -#SBATCH --ntasks-per-node=16 -#SBATCH --time=10:00:00 -#SBATCH --mem=70gb -#SBATCH --output=Ray.info -#SBATCH --error=Ray.error - -module load allinea -module load compiler/gcc/4.7 openmpi/1.6 ray/2.3 - -perf-report mpiexec -n 16 Ray -k 31 -p -p input_reads_pair_1.fasta input_reads\_pair_2.fasta -o output_directory -{{< /highlight >}} -{{% /panel %}} - -Ray is MPI and therefore additional Allinea Performance Reports options -are not required. The `perf-report` command is placed in front of the -actual `Ray` command we want to analyze. - -When the application finishes, the performance report is generated in -the working directory. -For the executed application, this is how the report looks like: - -{{< figure src="/images/11635303.png" width="850" >}} - -From the report, we can see that **Ray **is Compute-Bound application. -Most of the running time is spent in point-to-point calls with a low -transfer rate which may be caused by inefficient message sizes. -Therefore, running this application with fewer MPI processes and more -data on each process may be more efficient. - -Running Allinea Performance Reports and identifying application -bottlenecks is really useful for improving the application and better -utilization of the available resources. diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md b/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md deleted file mode 100644 index e9345267..00000000 --- a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md +++ /dev/null @@ -1,124 +0,0 @@ -+++ -title = " Running BLAST Alignment" -description = "How to run BLAST alignment on HCC resources" -weight = "10" -+++ - - -Basic BLAST has the following commands: - -- **blastn**: search nucleotide database using a nucleotide query -- **blastp**: search protein database using a protein query -- **blastx**: search protein database using a translated nucleotide query -- **tblastn**: search translated nucleotide database using a protein query -- **tblastx**: search translated nucleotide database using a translated nucleotide query - - -The basic usage of **blastn** is: -{{< highlight bash >}} -$ blastn -query input_reads.fasta -db input_reads_db -out blastn_output.alignments [options] -{{< /highlight >}} -where **input_reads.fasta** is an input file of sequence data in fasta format, **input_reads_db** is the generated BLAST database, and **blastn_output.alignments** is the output file where the alignments are stored. - -Additional parameters can be found in the [BLAST manual] (https://www.ncbi.nlm.nih.gov/books/NBK279690/), or by typing: -{{< highlight bash >}} -$ blastn -help -{{< /highlight >}} - -These BLAST alignment commands are multi-threaded, and therefore using the BLAST option **-num_threads <number_of_CPUs>** is recommended. - - -HCC hosts multiple BLAST databases and indices on Crane. In order to use these resources, the ["biodata" module] ({{<relref "/Applications/Application_Specific_Guides/bioinformatics_tools/biodata_module">}}) needs to be loaded first. The **$BLAST** variable contains the following currently available databases: - -- **16SMicrobial** -- **env_nt** -- **est** -- **est_human** -- **est_mouse** -- **est_others** -- **gss** -- **human_genomic** -- **human_genomic_transcript** -- **mouse_genomic_transcript** -- **nr** -- **nt** -- **other_genomic** -- **refseq_genomic** -- **refseq_rna** -- **sts** -- **swissprot** -- **tsa_nr** -- **tsa_nt** - -If you want to create and use a BLAST database that is not mentioned above, check [Create Local BLAST Database]({{<relref "create_local_blast_database" >}}). - - -Basic SLURM example of nucleotide BLAST run against the non-redundant **nt** BLAST database with `8 CPUs` is provided below. When running BLAST alignment, it is recommended to first copy the query and database files to the **/scratch/** directory of the worker node. Moreover, the BLAST output is also saved in this directory (**/scratch/blastn_output.alignments**). After BLAST finishes, the output file is copied from the worker node to your current work directory. -{{% notice info %}} -**Please note that the worker nodes can not write to the */home/* directories and therefore you need to run your job from your */work/* directory.** -**This example will first copy your database to faster local storage called “scratch”. This can greatly improve performance!** -{{% /notice %}} - -{{% panel header="`blastn_alignment.submit`"%}} -{{< highlight bash >}} -#!/bin/sh -#SBATCH --job-name=BlastN -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=8 -#SBATCH --time=168:00:00 -#SBATCH --mem=20gb -#SBATCH --output=BlastN.%J.out -#SBATCH --error=BlastN.%J.err - -module load blast/2.7 -module load biodata/1.0 - -cd $WORK/<project_folder> -cp $BLAST/nt.* /scratch/ -cp input_reads.fasta /scratch/ - -blastn -query /scratch/input_reads.fasta -db /scratch/nt -out /scratch/blastn_output.alignments -num_threads $SLURM_NTASKS_PER_NODE - -cp /scratch/blastn_output.alignments $WORK/<project_folder> -{{< /highlight >}} -{{% /panel %}} - - -One important BLAST parameter is the **e-value threshold** that changes the number of hits returned by showing only those with value lower than the given. To show the hits with **e-value** lower than 1e-10, modify the given script as follows: -{{< highlight bash >}} -$ blastn -query input_reads.fasta -db input_reads_db -out blastn_output.alignments -num_threads $SLURM_NTASKS_PER_NODE -evalue 1e-10 -{{< /highlight >}} - - -The default BLAST output is in pairwise format. However, BLAST’s parameter **-outfmt** supports output in [different formats] (https://www.ncbi.nlm.nih.gov/books/NBK279684/) that are easier for parsing. - - -Basic SLURM example of protein BLAST run against the non-redundant **nr **BLAST database with tabular output format and `8 CPUs` is shown below. Similarly as before, the query and database files are copied to the **/scratch/** directory. The BLAST output is also saved in this directory (**/scratch/blastx_output.alignments**). After BLAST finishes, the output file is copied from the worker node to your current work directory. -{{% notice info %}} -**Please note that the worker nodes can not write to the */home/* directories and therefore you need to run your job from your */work/* directory.** -**This example will first copy your database to faster local storage called “scratch”. This can greatly improve performance!** -{{% /notice %}} - -{{% panel header="`blastx_alignment.submit`"%}} -{{< highlight bash >}} -#!/bin/sh -#SBATCH --job-name=BlastX -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=8 -#SBATCH --time=168:00:00 -#SBATCH --mem=20gb -#SBATCH --output=BlastX.%J.out -#SBATCH --error=BlastX.%J.err - -module load blast/2.7 -module load biodata/1.0 - -cd $WORK/<project_folder> -cp $BLAST/nr.* /scratch/ -cp input_reads.fasta /scratch/ - -blastx -query /scratch/input_reads.fasta -db /scratch/nr -outfmt 6 -out /scratch/blastx_output.alignments -num_threads $SLURM_NTASKS_PER_NODE - -cp /scratch/blastx_output.alignments $WORK/<project_folder> -{{< /highlight >}} -{{% /panel %}} diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/biodata_module.md b/content/Applications/Application_Specific_Guides/bioinformatics_tools/biodata_module.md deleted file mode 100644 index 9b9ca690..00000000 --- a/content/Applications/Application_Specific_Guides/bioinformatics_tools/biodata_module.md +++ /dev/null @@ -1,88 +0,0 @@ -+++ -title = "Biodata Module" -description = "How to use Biodata Module on HCC machines" -scripts = ["https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/jquery.tablesorter.min.js", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-pager.min.js","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-filter.min.js","/js/sort-table.js"] -css = ["http://mottie.github.io/tablesorter/css/theme.default.css","https://mottie.github.io/tablesorter/css/theme.dropbox.css", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/jquery.tablesorter.pager.min.css","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/filter.formatter.min.css"] -weight = "52" -+++ - - -HCC hosts multiple databases (BLAST, KEGG, PANTHER, InterProScan), genome files, short read aligned indices etc. on Crane. -In order to use these resources, the "**biodata**" module needs to be loaded first. -For how to load module, please check [Module Commands]({{< relref "module_commands" >}}). - -Loading the "**biodata**" module will pre-set many environment variables, but most likely you will only need a subset of them. Environment variables can be used in your command or script by prefixing `$` to the name. - -The major environment variables are: -**$DATA** - main directory -**$BLAST** - Directory containing all available BLAST (nucleotide and protein) databases -**$KEGG** - KEGG database main entry point (requires license) -**$PANTHER** - PANTHER database main entry point (latest) -**$IPR** - InterProScan database main entry point (latest) -**$GENOMES** - Directory containing all available genomes (multiple sources, builds possible -**$INDICES** - Directory containing indices for bowtie, bowtie2, bwa for all available genomes -**$UNIPROT** - Directory containing latest release of full UniProt database - - -In order to check what genomes are available, you can type: -{{< highlight bash >}} -$ ls $GENOMES -{{< /highlight >}} - - -In order to check what BLAST databases are available, you can just type: -{{< highlight bash >}} -$ ls $BLAST -{{< /highlight >}} - - -An example of how to run Bowtie2 local alignment on Crane utilizing the default Horse, *Equus caballus* index (*BOWTIE2\_HORSE*) with paired-end fasta files and 8 CPUs is shown below: -{{% panel header="`bowtie2_alignment.submit`"%}} -{{< highlight bash >}} -#!/bin/sh -#SBATCH --job-name=Bowtie2 -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=8 -#SBATCH --time=168:00:00 -#SBATCH --mem=10gb -#SBATCH --output=Bowtie2.%J.out -#SBATCH --error=Bowtie2.%J.err - -module load bowtie/2.2 -module load biodata - -bowtie2 -x $BOWTIE2_HORSE -f -1 input_reads_pair_1.fasta -2 input_reads_pair_2.fasta -S bowtie2_alignments.sam --local -p $SLURM_NTASKS_PER_NODE - -{{< /highlight >}} -{{% /panel %}} - - -An example of BLAST run against the non-redundant nucleotide database available on Crane is provided below: -{{% panel header="`blastn_alignment.submit`"%}} -{{< highlight bash >}} -#!/bin/sh -#SBATCH --job-name=BlastN -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=8 -#SBATCH --time=168:00:00 -#SBATCH --mem=10gb -#SBATCH --output=BlastN.%J.out -#SBATCH --error=BlastN.%J.err - -module load blast/2.7 -module load biodata -cp $BLAST/nt.* /scratch -cp input_reads.fasta /scratch - -blastn -db /scratch/nt -query /scratch/input_reads.fasta -out /scratch/blast_nucleotide.results -cp /scratch/blast_nucleotide.results . - -{{< /highlight >}} -{{% /panel %}} - - -### Available Organisms - -The organisms and their appropriate environmental variables for all genomes and chromosome files, as well as indices are shown in the table below. - -{{< table url="http://rhino-head.unl.edu:8192/bio/data/json" >}} diff --git a/content/Applications/Application_Specific_Guides/dmtcp_checkpointing.md b/content/Applications/Application_Specific_Guides/dmtcp_checkpointing.md deleted file mode 100644 index 66985673..00000000 --- a/content/Applications/Application_Specific_Guides/dmtcp_checkpointing.md +++ /dev/null @@ -1,138 +0,0 @@ -+++ -title = "DMTCP Checkpointing" -description = "How to use the DMTCP utility to checkpoint your application." -+++ - -[DMTCP](http://dmtcp.sourceforge.net) -(Distributed MultiThreaded Checkpointing) is a checkpointing package for -applications. Using checkpointing allows resuming of a failing -simulation due to failing resources (e.g. hardware, software, exceeded -time and memory resources). - -DMTCP supports both sequential and multi-threaded applications. Some -examples of binary programs on Linux distributions that can be used with -DMTCP are OpenMP, MATLAB, Python, Perl, MySQL, bash, gdb, X-Windows etc. - -DMTCP provides support for several resource managers, including SLURM, -the resource manager used in HCC. The DMTCP module is available both on -Crane, and is enabled by typing: - -{{< highlight bash >}} -module load dmtcp -{{< /highlight >}} - -After the module is loaded, the first step is to run the command: - -{{< highlight bash >}} -[<username>@login.crane ~]$ dmtcp_launch --new-coordinator --rm --interval <interval_time_seconds> <your_command> -{{< /highlight >}} - -where `--rm` option enables SLURM support, -**\<interval_time_seconds\>** is the time in seconds between -automatic checkpoints, and **\<your_command\>** is the actual -command you want to run and checkpoint. - -Beside the general options shown above, more `dmtcp_launch` options -can be seen by using: - -{{< highlight bash >}} -[<username>@login.crane ~]$ dmtcp_launch --help -{{< /highlight >}} - -`dmtcp_launch` creates few files that are used to resume the -cancelled job, such as *ckpt\_\*.dmtcp* and -*dmtcp\_restart\_script\*.sh*. Unless otherwise stated -(using `--ckptdir` option), these files are stored in the current -working directory. - - -The second step of DMTCP is to restart the cancelled job, and there are -two ways of doing that: - -- `dmtcp_restart ckpt_*.dmtcp` *\<options\>* (before running - this command delete any old *ckp\_\*.dmtcp* files in your current - directory) - -- `./dmtcp_restart_script.sh` *\<options\>* - -If there are no options defined in the *<options>* field, DMTCP -will keep running with the options defined in the initial -**dmtcp\_launch** call (such as interval time, output directory etc). - - -Simple example of using DMTCP with -[BLAST]({{< relref "/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/running_blast_alignment" >}}) -on crane is shown below: - -{{% panel theme="info" header="dmtcp_blastx.submit" %}} -{{< highlight batch >}} -#!/bin/sh -#SBATCH --job-name=BlastX -#SBATCH --nodes=1 -#SBATCH --ntasks=8 -#SBATCH --time=50:00:00 -#SBATCH --mem=20gb -#SBATCH --output=BlastX_info_1.txt -#SBATCH --error=BlastX_error_1.txt - -module load dmtcp -module load blast/2.4 - -cd $WORK/<project_folder> -cp -r /work/HCC/DATA/blastdb/nr/ /tmp/ -cp input_reads.fasta /tmp/ - -dmtcp_launch --new-coordinator --rm --interval 3600 blastx -query \ -/tmp/input_reads.fasta -db /tmp/nr/nr -out blastx_output.alignments \ --num_threads $SLURM_NTASKS_PER_NODE -{{< /highlight >}} -{{% /panel %}} - -In this example, DMTCP takes checkpoints every hour (`--interval 3600`), -and the actual command we want to checkpoint is `blastx` with -some general BLAST options defined with `-query`, `-db`, `-out`, -`-num_threads`. - -If this job is killed for various reasons, it can be restarted using the -following submit file: - -{{% panel theme="info" header="dmtcp_restart_blastx.submit" %}} -{{< highlight batch >}} -#!/bin/sh -#SBATCH --job-name=BlastX -#SBATCH --nodes=1 -#SBATCH --ntasks=8 -#SBATCH --time=50:00:00 -#SBATCH --mem=20gb -#SBATCH --output=BlastX_info_2.txt -#SBATCH --error=BlastX_error_2.txt - -module load dmtcp -module load blast/2.4 - -cd $WORK/<project_folder> -cp -r /work/HCC/DATA/blastdb/nr/ /tmp/ -cp input_reads.fasta /tmp/ - -# Start DMTCP -dmtcp_coordinator --daemon --port 0 --port-file /tmp/port -export DMTCP_COORD_HOST=`hostname` -export DMTCP_COORD_PORT=$(</tmp/port) - -# Restart job -./dmtcp_restart_script.sh -{{< /highlight >}} -{{% /panel %}} - -{{% notice info %}} -`dmtcp_restart` generates new -`ckpt_*.dmtcp` and `dmtcp_restart_script*.sh` files. Therefore, if -the restarted job is also killed due to unavailable/exceeded resources, -you can resubmit the same job again without any changes in the submit -file shown above (just don't forget to delete the old `ckpt_*.dmtcp` -files if you are using these files instead of `dmtcp_restart_script.sh`) -{{% /notice %}} - -Even though DMTCP tries to support most mainstream and commonly used -applications, there is no guarantee that every application can be -checkpointed and restarted. diff --git a/content/Applications/Application_Specific_Guides/fortran_c_on_hcc.md b/content/Applications/Application_Specific_Guides/fortran_c_on_hcc.md deleted file mode 100644 index 5d5eab9c..00000000 --- a/content/Applications/Application_Specific_Guides/fortran_c_on_hcc.md +++ /dev/null @@ -1,219 +0,0 @@ -+++ -title = "Fortran/C on HCC" -description = "How to compile and run Fortran/C program on HCC machines" -weight = "50" -+++ - -This quick start demonstrates how to implement a Fortran/C program on -HCC supercomputers. The sample codes and submit scripts can be -downloaded from [serial_dir.zip](/attachments/serial_dir.zip). - -#### Login to a HCC Cluster - -Log in to a HCC cluster through PuTTY ([For Windows Users]({{< relref "/Quickstarts/connecting/for_windows_users">}})) or Terminal ([For Mac/Linux -Users]({{< relref "/Quickstarts/connecting/for_maclinux_users">}})) and make a subdirectory called `serial_dir` under the `$WORK` directory. - -{{< highlight bash >}} -$ cd $WORK -$ mkdir serial_dir -{{< /highlight >}} - -In the subdirectory `serial_dir`, save all the relevant Fortran/C codes. Here we include two demo -programs, `demo_f_serial.f90` and `demo_c_serial.c`, that compute the sum from 1 to 20. - -{{%expand "demo_f_serial.f90" %}} -{{< highlight bash >}} -Program demo_f_serial - implicit none - integer, parameter :: N = 20 - real*8 w - integer i - common/sol/ x - real*8 x - real*8, dimension(N) :: y - do i = 1,N - w = i*1d0 - call proc(w) - y(i) = x - write(6,*) 'i,x = ', i, y(i) - enddo - write(6,*) 'sum(y) =',sum(y) -Stop -End Program -Subroutine proc(w) - real*8, intent(in) :: w - common/sol/ x - real*8 x - x = w -Return -End Subroutine -{{< /highlight >}} -{{% /expand %}} - - -{{%expand "demo_c_serial.c" %}} -{{< highlight c >}} -//demo_c_serial -#include <stdio.h> - -double proc(double w){ - double x; - x = w; - return x; -} - -int main(int argc, char* argv[]){ - int N=20; - double w; - int i; - double x; - double y[N]; - double sum; - for (i = 1; i <= N; i++){ - w = i*1e0; - x = proc(w); - y[i-1] = x; - printf("i,x= %d %lf\n", i, y[i-1]) ; - } - - sum = 0e0; - for (i = 1; i<= N; i++){ - sum = sum + y[i-1]; - } - - printf("sum(y)= %lf\n", sum); - -return 0; -} -{{< /highlight >}} -{{% /expand %}} - ---- - -#### Compiling the Code - -The compiling of a Fortran/C++ code to executable is usually done behind -the scene in a Graphical User Interface (GUI) environment, such as -Microsoft Visual Studio. In a HCC cluster, the compiling is done -explicitly by first loading a choice compiler and then executing the -corresponding compiling command. Here we will use the GNU Complier -Collection, `gcc`, for demonstration. Other available compilers such as -`intel` or `pgi` can be looked up using the command -line `module avail`. Before compiling the code, make sure there is no -dependency on any numerical library in the code. If invoking a numerical -library is necessary, contact a HCC specialist -({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)) to -discuss implementation options. - -{{< highlight bash >}} -$ module load compiler/gcc/8.2 -$ gfortran demo_f_serial.f90 -o demo_f_serial.x -$ gcc demo_c_serial.c -o demo_c_serial.x -{{< /highlight >}} - -The above commends load the `gcc` complier and use the compiling -commands `gfortran` or `gcc` to compile the codes to`.x` files -(executables). - -#### Creating a Submit Script - -Create a submit script to request one core (default) and 1-min run time -on the supercomputer. The name of the main program enters at the last -line. - -{{% panel header="`submit_f.serial`"%}} -{{< highlight bash >}} -#!/bin/sh -#SBATCH --mem-per-cpu=1024 -#SBATCH --time=00:01:00 -#SBATCH --job-name=Fortran -#SBATCH --error=Fortran.%J.err -#SBATCH --output=Fortran.%J.out - -module load compiler/gcc/4.9 -./demo_f_serial.x -{{< /highlight >}} -{{% /panel %}} - -{{% panel header="`submit_c.serial`"%}} -{{< highlight bash >}} -#!/bin/sh -#SBATCH --mem-per-cpu=1024 -#SBATCH --time=00:01:00 -#SBATCH --job-name=C -#SBATCH --error=C.%J.err -#SBATCH --output=C.%J.out - -module load compiler/gcc/4.9 -./demo_c_serial.x -{{< /highlight >}} -{{% /panel %}} - -#### Submit the Job - -The job can be submitted through the command `sbatch`. The job status -can be monitored by entering `squeue` with the `-u` option. - -{{< highlight bash >}} -$ sbatch submit_f.serial -$ sbatch submit_c.serial -$ squeue -u <username> -{{< /highlight >}} - -Replace `<username>` with your HCC username. - -#### Sample Output - -The sum from 1 to 20 is computed and printed to the `.out` file (see -below). -{{%expand "Fortran.out" %}} -{{< highlight batchfile>}} - i,x = 1 1.0000000000000000 - i,x = 2 2.0000000000000000 - i,x = 3 3.0000000000000000 - i,x = 4 4.0000000000000000 - i,x = 5 5.0000000000000000 - i,x = 6 6.0000000000000000 - i,x = 7 7.0000000000000000 - i,x = 8 8.0000000000000000 - i,x = 9 9.0000000000000000 - i,x = 10 10.000000000000000 - i,x = 11 11.000000000000000 - i,x = 12 12.000000000000000 - i,x = 13 13.000000000000000 - i,x = 14 14.000000000000000 - i,x = 15 15.000000000000000 - i,x = 16 16.000000000000000 - i,x = 17 17.000000000000000 - i,x = 18 18.000000000000000 - i,x = 19 19.000000000000000 - i,x = 20 20.000000000000000 - sum(y) = 210.00000000000000 -{{< /highlight >}} -{{% /expand %}} - -{{%expand "C.out" %}} -{{< highlight batchfile>}} -i,x= 1 1.000000 -i,x= 2 2.000000 -i,x= 3 3.000000 -i,x= 4 4.000000 -i,x= 5 5.000000 -i,x= 6 6.000000 -i,x= 7 7.000000 -i,x= 8 8.000000 -i,x= 9 9.000000 -i,x= 10 10.000000 -i,x= 11 11.000000 -i,x= 12 12.000000 -i,x= 13 13.000000 -i,x= 14 14.000000 -i,x= 15 15.000000 -i,x= 16 16.000000 -i,x= 17 17.000000 -i,x= 18 18.000000 -i,x= 19 19.000000 -i,x= 20 20.000000 -sum(y)= 210.000000 -{{< /highlight >}} -{{% /expand %}} diff --git a/content/Applications/Application_Specific_Guides/mpi_jobs_on_hcc.md b/content/Applications/Application_Specific_Guides/mpi_jobs_on_hcc.md deleted file mode 100644 index 399c7f0f..00000000 --- a/content/Applications/Application_Specific_Guides/mpi_jobs_on_hcc.md +++ /dev/null @@ -1,322 +0,0 @@ -+++ -title = "MPI Jobs on HCC" -description = "How to compile and run MPI programs on HCC machines" -weight = "52" -+++ - -This quick start demonstrates how to implement a parallel (MPI) -Fortran/C program on HCC supercomputers. The sample codes and submit -scripts can be downloaded from [mpi_dir.zip](/attachments/mpi_dir.zip). - -#### Login to a HCC Cluster - -Log in to a HCC cluster through PuTTY ([For Windows Users]({{< relref "/Quickstarts/connecting/for_windows_users">}})) or Terminal ([For Mac/Linux -Users]({{< relref "/Quickstarts/connecting/for_maclinux_users">}})) and make a subdirectory called `mpi_dir` under the `$WORK` directory. - -{{< highlight bash >}} -$ cd $WORK -$ mkdir mpi_dir -{{< /highlight >}} - -In the subdirectory `mpi_dir`, save all the relevant codes. Here we -include two demo programs, `demo_f_mpi.f90` and `demo_c_mpi.c`, that -compute the sum from 1 to 20 through parallel processes. A -straightforward parallelization scheme is used for demonstration -purpose. First, the master core (i.e. `myid=0`) distributes equal -computation workload to a certain number of cores (as specified by -`--ntasks `in the submit script). Then, each worker core computes a -partial summation as output. Finally, the master core collects the -outputs from all worker cores and perform an overall summation. For easy -comparison with the serial code ([Fortran/C on HCC]({{< relref "fortran_c_on_hcc">}})), the -added lines in the parallel code (MPI) are marked with "!=" or "//=". - -{{%expand "demo_f_mpi.f90" %}} -{{< highlight fortran >}} -Program demo_f_mpi -!====== MPI ===== - use mpi -!================ - implicit none - integer, parameter :: N = 20 - real*8 w - integer i - common/sol/ x - real*8 x - real*8, dimension(N) :: y -!============================== MPI ================================= - integer ind - real*8, dimension(:), allocatable :: y_local - integer numnodes,myid,rc,ierr,start_local,end_local,N_local - real*8 allsum -!==================================================================== - -!============================== MPI ================================= - call mpi_init( ierr ) - call mpi_comm_rank ( mpi_comm_world, myid, ierr ) - call mpi_comm_size ( mpi_comm_world, numnodes, ierr ) - ! - N_local = N/numnodes - allocate ( y_local(N_local) ) - start_local = N_local*myid + 1 - end_local = N_local*myid + N_local -!==================================================================== - do i = start_local, end_local - w = i*1d0 - call proc(w) - ind = i - N_local*myid - y_local(ind) = x -! y(i) = x -! write(6,*) 'i, y(i)', i, y(i) - enddo -! write(6,*) 'sum(y) =',sum(y) -!============================================== MPI ===================================================== - call mpi_reduce( sum(y_local), allsum, 1, mpi_real8, mpi_sum, 0, mpi_comm_world, ierr ) - call mpi_gather ( y_local, N_local, mpi_real8, y, N_local, mpi_real8, 0, mpi_comm_world, ierr ) - - if (myid == 0) then - write(6,*) '-----------------------------------------' - write(6,*) '*Final output from... myid=', myid - write(6,*) 'numnodes =', numnodes - write(6,*) 'mpi_sum =', allsum - write(6,*) 'y=...' - do i = 1, N - write(6,*) y(i) - enddo - write(6,*) 'sum(y)=', sum(y) - endif - - deallocate( y_local ) - call mpi_finalize(rc) -!======================================================================================================== - -Stop -End Program -Subroutine proc(w) - real*8, intent(in) :: w - common/sol/ x - real*8 x - - x = w - -Return -End Subroutine -{{< /highlight >}} -{{% /expand %}} - -{{%expand "demo_c_mpi.c" %}} -{{< highlight c >}} -//demo_c_mpi -#include <stdio.h> -//======= MPI ======== -#include "mpi.h" -#include <stdlib.h> -//==================== - -double proc(double w){ - double x; - x = w; - return x; -} - -int main(int argc, char* argv[]){ - int N=20; - double w; - int i; - double x; - double y[N]; - double sum; -//=============================== MPI ============================ - int ind; - double *y_local; - int numnodes,myid,rc,ierr,start_local,end_local,N_local; - double allsum; -//================================================================ -//=============================== MPI ============================ - MPI_Init(&argc, &argv); - MPI_Comm_rank( MPI_COMM_WORLD, &myid ); - MPI_Comm_size ( MPI_COMM_WORLD, &numnodes ); - N_local = N/numnodes; - y_local=(double *) malloc(N_local*sizeof(double)); - start_local = N_local*myid + 1; - end_local = N_local*myid + N_local; -//================================================================ - - for (i = start_local; i <= end_local; i++){ - w = i*1e0; - x = proc(w); - ind = i - N_local*myid; - y_local[ind-1] = x; -// y[i-1] = x; -// printf("i,x= %d %lf\n", i, y[i-1]) ; - } - sum = 0e0; - for (i = 1; i<= N_local; i++){ - sum = sum + y_local[i-1]; - } -// printf("sum(y)= %lf\n", sum); -//====================================== MPI =========================================== - MPI_Reduce( &sum, &allsum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); - MPI_Gather( &y_local[0], N_local, MPI_DOUBLE, &y[0], N_local, MPI_DOUBLE, 0, MPI_COMM_WORLD ); - - if (myid == 0){ - printf("-----------------------------------\n"); - printf("*Final output from... myid= %d\n", myid); - printf("numnodes = %d\n", numnodes); - printf("mpi_sum = %lf\n", allsum); - printf("y=...\n"); - for (i = 1; i <= N; i++){ - printf("%lf\n", y[i-1]); - } - sum = 0e0; - for (i = 1; i<= N; i++){ - sum = sum + y[i-1]; - } - - printf("sum(y) = %lf\n", sum); - - } - - free( y_local ); - MPI_Finalize (); -//====================================================================================== - -return 0; -} -{{< /highlight >}} -{{% /expand %}} - ---- - -#### Compiling the Code - -The compiling of a MPI code requires first loading a compiler "engine" -such as `gcc`, `intel`, or `pgi` and then loading a MPI wrapper -`openmpi`. Here we will use the GNU Complier Collection, `gcc`, for -demonstration. - -{{< highlight bash >}} -$ module load compiler/gcc/6.1 openmpi/2.1 -$ mpif90 demo_f_mpi.f90 -o demo_f_mpi.x -$ mpicc demo_c_mpi.c -o demo_c_mpi.x -{{< /highlight >}} - -The above commends load the `gcc` complier with the `openmpi` wrapper. -The compiling commands `mpif90` or `mpicc` are used to compile the codes -to`.x` files (executables). - -### Creating a Submit Script - -Create a submit script to request 5 cores (with `--ntasks`). A parallel -execution command `mpirun ./` needs to enter to last line before the -main program name. - -{{% panel header="`submit_f.mpi`"%}} -{{< highlight bash >}} -#!/bin/sh -#SBATCH --ntasks=5 -#SBATCH --mem-per-cpu=1024 -#SBATCH --time=00:01:00 -#SBATCH --job-name=Fortran -#SBATCH --error=Fortran.%J.err -#SBATCH --output=Fortran.%J.out - -mpirun ./demo_f_mpi.x -{{< /highlight >}} -{{% /panel %}} - -{{% panel header="`submit_c.mpi`"%}} -{{< highlight bash >}} -#!/bin/sh -#SBATCH --ntasks=5 -#SBATCH --mem-per-cpu=1024 -#SBATCH --time=00:01:00 -#SBATCH --job-name=C -#SBATCH --error=C.%J.err -#SBATCH --output=C.%J.out - -mpirun ./demo_c_mpi.x -{{< /highlight >}} -{{% /panel %}} - -#### Submit the Job - -The job can be submitted through the command `sbatch`. The job status -can be monitored by entering `squeue` with the `-u` option. - -{{< highlight bash >}} -$ sbatch submit_f.mpi -$ sbatch submit_c.mpi -$ squeue -u <username> -{{< /highlight >}} - -Replace `<username>` with your HCC username. - -Sample Output -------------- - -The sum from 1 to 20 is computed and printed to the `.out` file (see -below). The outputs from the 5 cores are collected and processed by the -master core (i.e. `myid=0`). - -{{%expand "Fortran.out" %}} -{{< highlight batchfile>}} - ----------------------------------------- - *Final output from... myid= 0 - numnodes = 5 - mpi_sum = 210.00000000000000 - y=... - 1.0000000000000000 - 2.0000000000000000 - 3.0000000000000000 - 4.0000000000000000 - 5.0000000000000000 - 6.0000000000000000 - 7.0000000000000000 - 8.0000000000000000 - 9.0000000000000000 - 10.000000000000000 - 11.000000000000000 - 12.000000000000000 - 13.000000000000000 - 14.000000000000000 - 15.000000000000000 - 16.000000000000000 - 17.000000000000000 - 18.000000000000000 - 19.000000000000000 - 20.000000000000000 - sum(y)= 210.00000000000000 -{{< /highlight >}} -{{% /expand %}} - -{{%expand "C.out" %}} -{{< highlight batchfile>}} ------------------------------------ -*Final output from... myid= 0 -numnodes = 5 -mpi_sum = 210.000000 -y=... -1.000000 -2.000000 -3.000000 -4.000000 -5.000000 -6.000000 -7.000000 -8.000000 -9.000000 -10.000000 -11.000000 -12.000000 -13.000000 -14.000000 -15.000000 -16.000000 -17.000000 -18.000000 -19.000000 -20.000000 -sum(y) = 210.000000 -{{< /highlight >}} -{{% /expand %}} - diff --git a/content/Applications/Using_Your_Own_Software/using_anaconda_package_manager.md b/content/Applications/Using_Your_Own_Software/using_anaconda_package_manager.md deleted file mode 100644 index b1a70522..00000000 --- a/content/Applications/Using_Your_Own_Software/using_anaconda_package_manager.md +++ /dev/null @@ -1,305 +0,0 @@ -+++ -title = "Using Anaconda Package Manager" -description = "How to use the Anaconda Package Manager on HCC resources." -+++ - -[Anaconda](https://www.anaconda.com/what-is-anaconda), -from [Anaconda, Inc](https://www.anaconda.com) -is a completely free enterprise-ready distribution for large-scale data -processing, predictive analytics, and scientific computing. It includes -over 195 of the most popular Python packages for science, math, -engineering, and data analysis. **It also offers the ability to easily -create custom _environments_ by mixing and matching different versions -of Python and/or R and other packages into isolated environments that -individual users are free to create.** Anaconda includes the `conda` -package and environment manager to make managing these environments -straightforward. - -- [Using Anaconda](#using-anaconda) -- [Creating custom Anaconda Environment](#creating-custom-anaconda-environment) -- [Creating custom GPU Anaconda Environment](#creating-custom-gpu-anaconda-environment) -- [Adding Packages to an Existing Environment](#adding-packages-to-an-existing-environment) -- [Using an Anaconda Environment in a Jupyter Notebook on Crane](#using-an-anaconda-environment-in-a-jupyter-notebook-on-crane) - -### Using Anaconda - -While the standard methods of installing packages via `pip` -and `easy_install` work with Anaconda, the preferred method is using -the `conda` command. - -{{% notice info %}} -Full documentation on using Conda is available -at http://conda.pydata.org/docs/ - -A [cheatsheet](/attachments/11635089.pdf) is also provided. -{{% /notice %}} - -A few examples of the basic commands are provided here. For a full -explanation of all of Anaconda/Conda's capabilities, see the -documentation linked above. - -Anaconda is provided through the `anaconda` module on HCC machines. To -begin using it, load the Anaconda module. - -{{% panel theme="info" header="Load the Anaconda module to start using Conda" %}} -{{< highlight bash >}} -module load anaconda -{{< /highlight >}} -{{% /panel %}} - -To display general information about Conda/Anaconda, use the `info` subcommand. - -{{% panel theme="info" header="Display general information about Conda/Anaconda" %}} -{{< highlight bash >}} -conda info -{{< /highlight >}} -{{% /panel %}} - -Conda allows the easy creation of isolated, custom environments with -packages and versions of your choosing. To show all currently available -environments, and which is active, use the `info `subcommand with the -`-e` option. - -{{% panel theme="info" header="List available environments" %}} -{{< highlight bash >}} -conda info -e -{{< /highlight >}} -{{% /panel %}} - -The active environment will be marked with an asterisk (\*) character. - -The `list` command will show all packages installed -in the currently active environment. - -{{% panel theme="info" header="List installed packages in current environment" %}} -{{< highlight bash >}} -conda list -{{< /highlight >}} -{{% /panel %}} - -To find the names of packages, use the `search` subcommand. - -{{% panel theme="info" header="Search for packages" %}} -{{< highlight bash >}} -conda search numpy -{{< /highlight >}} -{{% /panel %}} - -If the package is available, this will also display available package -versions and compatible Python versions the package may be installed -under. - -### Creating Custom Anaconda Environments - -The `create` command is used to create a new environment. It requires -at a minimum a name for the environment, and at least one package to -install. For example, suppose we wish to create a new environment, and -need version 1.8 of NumPy. - -{{% notice info %}} -The `conda create` command must be run on the login node. -{{% /notice %}} - -{{% panel theme="info" header="Create a new environment by providing a name and package specification" %}} -{{< highlight bash >}} -conda create -n mynumpy numpy=1.8 -{{< /highlight >}} -{{% /panel %}} - -This will create a new environment called 'mynumpy' and installed NumPy -version 1.8, along with any required dependencies. - -To use the environment, we must first *activate* it. - -{{% panel theme="info" header="Activate environment" %}} -{{< highlight bash >}} -conda activate mynumpy -{{< /highlight >}} -{{% /panel %}} - -Our new environment is now active, and we can use it. The shell prompt -will change to indicate this as well (this can be disable if desired). - -To use your environment in a job, add the lines - -{{% panel theme="info" header="Use your environment in a SLURM job" %}} -{{< highlight bash >}} -module load anaconda -conda activate mynumpy -{{< /highlight >}} -{{% /panel %}} - -to the beginning of your submit script (replacing `mynumpy` with your environment's name). - -### Creating Custom GPU Anaconda Environments - -We provide GPU versions of various frameworks such as `tensorflow`, `keras`, `theano`, via [modules](../module_commands). However, sometimes you may need additional libraries or packages that are not available as part of these modules. In this case, you will need to create your own GPU Anaconda environment. - -To do this, you need to first clone one of our GPU modules to a new Anaconda environment, and then install the desired packages in this new environment. - -The reason for this is that the GPU modules we support are built using the specific CUDA drivers our GPU nodes have. If you just create custom GPU environment without cloning the module, your code will not utilize the GPUs. - - -For example, if you want to use `tensorflow` with additional packages, first do: -{{% panel theme="info" header="Cloning GPU module to a new Anaconda environment" %}} -{{< highlight bash >}} -module load tensorflow-gpu/py36/1.12 anaconda -conda create -n tensorflow-gpu-1.12-custom --clone $CONDA_DEFAULT_ENV -module purge -{{< /highlight >}} -{{% /panel %}} - -This will create a new `tensorflow-gpu-1.12-custom` environment in your home directory that is a copy of the `tensorflow-gpu` module. Then, you can install the additional packages you need in this environment. -{{% panel theme="info" header="Install new packages in the currently active environment" %}} -{{< highlight bash >}} -module load anaconda -conda activate tensorflow-gpu-1.12-custom -conda install <packages> -{{< /highlight >}} -{{% /panel %}} - -Next, whenever you want to use this custom GPU Anaconda environment, you need to add these two lines in your submit script: -{{< highlight bash >}} -module load anaconda -conda activate tensorflow-gpu-1.12-custom -{{< /highlight >}} - -{{% notice info %}} -If you have custom GPU Anaconda environment please only use the two lines from above and **DO NOT** load the module you have cloned earlier. Using `module load tensorflow-gpu/py36/1.12` and `conda activate tensorflow-gpu-1.12-custom` in the same script is **wrong** and may give you various errors and incorrect results. -{{% /notice %}} - - -### Adding Packages to an Existing Environment - -To install additional packages in an environment, use the `install` -subcommand. Suppose we want to install iPython in our 'mynumpy' -environment. While the environment is active, use `install `with no -additional arguments. - -{{% panel theme="info" header="Install a new package in the currently active environment" %}} -{{< highlight bash >}} -conda install ipython -{{< /highlight >}} -{{% /panel %}} - -{{% notice info %}} -The `conda install` command must be run on the login node. -{{% /notice %}} - -If you aren't currently in the environment you wish to install the -package in, add the `-n `option to specify the name. - -{{% panel theme="info" header="Install new packages in a specified environment" %}} -{{< highlight bash >}} -conda install -n mynumpy ipython -{{< /highlight >}} -{{% /panel %}} - -The `remove` subcommand to uninstall a package functions similarly. - -{{% panel theme="info" header="Remove package from currently active environment" %}} -{{< highlight bash >}} -conda remove ipython -{{< /highlight >}} -{{% /panel %}} - -{{% panel theme="info" header="Remove package from environment specified by name" %}} -{{< highlight bash >}} -conda remove -n mynumpy ipython -{{< /highlight >}} -{{% /panel %}} - -To exit an environment, we *deactivate* it. - -{{% panel theme="info" header="Exit current environment" %}} -Newer versions of anaconda: -{{< highlight bash >}} -conda deactivate -{{< /highlight >}} -Older versions of anaconda: -{{< highlight bash >}} -source deactivate -{{< /highlight >}} -{{% /panel %}} - -Finally, to completely remove an environment, add the `--all `option -to `remove`. - -{{% panel theme="info" header="Completely remove an environment" %}} -{{< highlight bash >}} -conda remove -n mynumpy --all -{{< /highlight >}} -{{% /panel %}} - -### Using an Anaconda Environment in a Jupyter Notebook on Crane - -It is not difficult to make an Anaconda environment available to a -Jupyter Notebook. To do so, follow the steps below, replacing -`myenv` with the name of the Python or R environment you wish to use: - -1. Stop any running Jupyter Notebooks and ensure you are logged out of - the JupyterHub instance at https://crane.unl.edu - 1. If you are not logged out, please click the Control Panel button - located in the top right corner. - 2. Click the "Stop My Server" Button to terminate the Jupyter - server. - 3. Click the logout button in the top right corner. - -2. Using the command-line environment, load the target conda - environment: - {{< highlight bash >}}conda activate myenv{{< /highlight >}} - -3. Install the Jupyter kernel and add the environment: - - 1. For a **Python** conda environment, install the IPykernel - package, and then the kernel specification: - - {{< highlight bash >}} - # Install ipykernel - conda install ipykernel - - # Install the kernel specification - python -m ipykernel install --user --name "$CONDA_DEFAULT_ENV" --display-name "Python ($CONDA_DEFAULT_ENV)" - {{< /highlight >}} - - 2. For an **R** conda environment, install the jupyter\_client and - IRkernel packages, and then the kernel specification: - - {{< highlight bash >}} - # Install PNG support for R, the R kernel for Jupyter, and the Jupyter client - conda install r-png - conda install r-irkernel jupyter_client - - # Install jupyter_client 5.2.3 from anaconda channel for bug workaround - conda install -c anaconda jupyter_client - - # Install the kernel specification - R -e "IRkernel::installspec(name = '$CONDA_DEFAULT_ENV', displayname = 'R ($CONDA_DEFAULT_ENV)', user = TRUE)" - {{< /highlight >}} - -4. Once you have the environment set up, deactivate it: - {{< highlight bash >}}conda deactivate{{< /highlight >}} - -5. To make your conda environments accessible from the worker nodes, - enter the following commands: - - {{< highlight bash >}} - mkdir -p $WORK/.jupyter - mv ~/.local/share/jupyter/kernels $WORK/.jupyter - ln -s $WORK/.jupyter/kernels ~/.local/share/jupyter/kernels - {{< /highlight >}} - - {{% notice note %}} -**Note**: Step 5 only needs to be done once. Any future created -environments will automatically be accessible from SLURM notebooks -once this is done. - -**Note**: For older version of anaconda, use `source deactivate` to -deactivate the environment. -{{% /notice %}} - -6. Login to JupyterHub at https://crane.unl.edu - and create a new notebook using the environment by selecting the - correct entry in the `New` dropdown menu in the top right - corner. - {{< figure src="/images/24151931.png" height="400" class="img-border">}} diff --git a/content/Connecting/linux/linux_file_permissions.md b/content/Connecting/linux/linux_file_permissions.md deleted file mode 100644 index 9a3d7099..00000000 --- a/content/Connecting/linux/linux_file_permissions.md +++ /dev/null @@ -1,48 +0,0 @@ -+++ -title = "Linux File Permissions" -description = "How to view and change file permissions with Linux commands" -weight = 20 -+++ - -- [Opening a Terminal Window] (#opening-a-terminal-window) -- [Listing File Permissions] (#listing-file-permissions) -- [Changing File Permissions] (#changing-file-permissions) - -## Opening a Terminal Window ------------------------ - -Use your local terminal to connect to a cluster, or open a new terminal window on [Crane](https://crane.unl.edu). - -Click [here](https://hcc.unl.edu/docs/Quickstarts/connecting/) if you need help connecting to a cluster -with a local terminal. - -Click [here](https://hcc.unl.edu/docs/guides/running_applications/jupyter/) if you need -help opening a new terminal window within JupyterHub. - -## Listing File Permissions - -Type the command `ls -l` to list the files and directories with file permissions for your current location. - -{{< figure src="/images/LinuxList.png" >}} - -The first character denotes whether an item is a file or a directory. If 'd' is shown, it's a directory, and if '-' is shown, it's a file. - Following the first character you will see some -combination of r,w,x, and -. The first rwx is the ‘read’ ‘write’ ‘execute’ file permissions for the creator - of that file or directory. A ‘-‘ instead means a particular permission has not been granted. For example “rw-“ means the - ‘execute’ permission has not been granted. The next three entries are the permissions for ‘group’ and the last three are the - permissions for everyone else. - - Following the file permissions are the name of the creator, the name of the group, the size of the file, the date it was created, and finally -the name of the file. - - -## Changing File Permissions - -To change file permissions, use the command "chmod [permissions] [filename]" where permissions are indicated by a three-digit code. -Each digit in the code correspondes to the three digits mentioned above in the permissions printout: One for the creater permissions, -one for the group permissions, and one for everyone else. The command is interpreted as follows: 4=read 2=write 1=execute and any combination of these is given by summing their codes. -Each chmod command will include 3 codes. -For example, to give the creator of mars.txt rights to read, write and execute, the group rights to read and execute, and everone else only the right to read, -we would use the command `chmod 754 mars.txt` - -{{< figure src="/images/LinuxChange.png" >}} diff --git a/content/Data_Storage/_index.md b/content/Data_Storage/_index.md deleted file mode 100644 index 6fd2d2aa..00000000 --- a/content/Data_Storage/_index.md +++ /dev/null @@ -1,10 +0,0 @@ -+++ -title = "Data Storage" -description = "Storing Data on HCC resources." -weight = "50" -+++ - -{{% children description="true" %}} - - - diff --git a/content/Data_Storage/preventing_file_loss.md b/content/Data_Storage/preventing_file_loss.md deleted file mode 100644 index ad3e954f..00000000 --- a/content/Data_Storage/preventing_file_loss.md +++ /dev/null @@ -1,170 +0,0 @@ -+++ -title = "Preventing File Loss" -description = "How to prevent file loss on HCC clusters" -weight = 40 -+++ - -Each research group is allocated 50TB of storage in `/work` on HCC -clusters. With over 400 active groups, HCC does not have the resources -to provide regular backups of `/work` without sacrificing the -performance of the existing filesystem. No matter how careful a user -might be, there is always the risk of file loss due to user error, -natural disasters, or equipment failure. - -However, there are a number of solutions available for backing up your -data. By carefully considering the benefits and limitations of each, -users can select the backup methods that work best for their particular -needs. For truly robust file backups, we recommend combining multiple -methods. For example, use Git regularly along with manual backups to an -external hard-drive at regular intervals such as monthly or biannually. - ---- -### 1. Use your local machine: - -If you have sufficient hard drive space, regularly backup your `/work` -directories to your personal computer. To avoid filling up your personal -hard-drives, consider using an external drive that can easily be placed -in a fireproof safe or at an off-site location for an extra level of -protection. To do this, you can either use [Globus -Connect]({{< relref "/Data_Transfer/globus_connect" >}}) or an -SCP client, such -as <a href="https://cyberduck.io/" class="external-link">Cyberduck</a> or <a href="https://winscp.net/eng/index.php" class="external-link">WinSCP</a>. -For help setting up an SCP client, check out our [Quick Start -Guides]({{< relref "/Quickstarts" >}}). - -For those worried about personal hard drive crashes, UNL -offers <a href="http://nsave.unl.edu/" class="external-link">the backup service NSave</a>. -For a small monthly fee, users can install software that will -automatically backup selected files from their personal machine. - -Benefits: - -- Gives you full control over what is backed up and when. -- Doesn't require the use of third party servers (when using SCP - clients). -- Take advantage of our high speed data transfers (10 Gb/s) when using - Globus Connect or [setup your SCP client to use our dedicated high - speed transfer - servers]({{< relref "/Data_Transfer/high_speed_data_transfers" >}}) - -Limitations: - -- The amount you can backup is limited by available hard-drive space. -- Manual backups of many files can be time consuming. - ---- -### 2. Use Git to preserve files and revision history: - -Git is a revision control service which can be run locally or can be -paired with a repository hosting service, such -as <a href="http://www.github.com/" class="external-link">GitHub</a>, to -provide a remote backup of your files. Git works best with smaller files -such as source code and manuscripts. Anyone with an InCommon login can -utilize <a href="http://git.unl.edu/" class="external-link">UNL's GitLab Instance</a>, -for free. - -Benefits: - -- Git is naturally collaboration-friendly, allowing multiple people to - easily work on the same project and provides great built-in tools to - control contributions and managing conflicting changes. -- Create individual repositories for each project, allowing you to - compartmentalize your work. -- Using UNL's GitLab instance allows you to create private or internal - (accessible by anyone within your organization) repositories. - -Limitations: - -- Git is not designed to handle large files. GitHub does not allow - files larger than 100MB unless using - their <a href="https://help.github.com/articles/about-git-large-file-storage/" class="external-link">Git Large File Storage</a> and - tracking files over 1GB in size can be time consuming and lead to - errors when using other repository hosts. - ---- -### 3. Use Attic: - -HCC offers -long-term, <a href="https://en.wikipedia.org/wiki/Nearline_storage" class="external-link">near-line</a> data -storage -through [Attic]({{< relref "using_attic" >}}). -HCC users with an existing account -can <a href="http://hcc.unl.edu/attic" class="external-link">apply for an Attic account</a> for -a <a href="http://hcc.unl.edu/priority-access-pricing" class="external-link">small annual fee</a> that -is substantially less than other cloud services. - -Benefits: - -- Attic files are backed up regularly at both HCC locations in Omaha - and Lincoln to help provide disaster tolerance and a second security - layer against file loss. -- No limits on individual or total file sizes. -- High speed data transfers between Attic and the clusters when using - [Globus Connect]({{< relref "/Data_Transfer/globus_connect" >}}) and [HCC's high-speed data - servers]({{< relref "/Data_Transfer/high_speed_data_transfers" >}}). - -Limitations: - -- Backups must be done manually which can be time consuming. Setting - up automated scripts can help speed up this process. - ---- -### 4. Use a cloud-based service, such as Box: - -Many of us are familiar with services such as Google Drive, Dropbox, Box -and OneDrive. These cloud-based services provide a convenient portal for -accessing your files from any computer. NU offers OneDrive and Box -services to all students, staff and faculty. But did you know that you -can link your Box account to HCC’s clusters to provide quick and easy -access to files stored there? [Follow a few set-up -steps]({{< relref "integrating_box_with_hcc" >}}) and -you can add files to and access files stored in your Box account -directly from HCC clusters. Setup your submit scripts to automatically -upload results as they are generated or use it interactively to store -important workflow scripts and maintain a backup of your analysis -results. - -Benefits: - -- <a href="http://box.unl.edu/" class="external-link">Box@UNL</a> offers - unlimited file storage while you are associated with UNL. -- Integrating with HCC clusters provides a quick and easy way to - automate backups of analysis results and workflow scripts. - -Limitations: - -- Box has individual file size limitations, larger files will need to - be backed up using an alternate method. - ---- -### 5. Copy important files to `/home`: - -While `/work` files and directories are not backed up, files and -directories in `/home` are backed up on a daily basis. Due to the -limitations of the `/home` filesystem, we strongly recommend that only -source code and compiled programs are backed up to `/home`. If you do -use `/home` to backup datasets, please keep a working copy in your -`/work` directories to prevent negatively impacting the functionality of -the cluster. - -Benefits: - -- No need to make manual backups. `\home` files are automatically backed - up daily. -- Files in `/home` are not subject to the 6 month purge policy that - exists on `/work`. -- Doesn't require the use of third-party software or tools. - -Limitations: - -- Home storage is limited to 20GB per user. Larger files sets will - need to be backed up using an alternate method. -- Home is read-only on the cluster worker nodes so results cannot be - directly written or altered from within a submitted job. - - -If you would like more information or assistance in setting up any of -these methods, contact us -at <a href="mailto:hcc-support@unl.edu" class="external-link">hcc-support@unl.edu</a>. - - diff --git a/content/Data_Storage/using_attic.md b/content/Data_Storage/using_attic.md deleted file mode 100644 index 75ca93fb..00000000 --- a/content/Data_Storage/using_attic.md +++ /dev/null @@ -1,105 +0,0 @@ -+++ -title = "Using Attic" -description = "How to store data on Attic" -weight = 20 -+++ - -For users who need long-term storage for large amount of data, HCC -provides an economical solution called Attic. Attic is a reliable -<a href="https://en.wikipedia.org/wiki/Nearline_storage" class="external-link">near-line data archive</a> storage -system. The files in Attic can be accessed and shared from anywhere -using [Globus -Connect]({{< relref "/Data_Transfer/globus_connect" >}}), -with a fast 10Gb/s link. Also, the data in Attic is backed up between -our Lincoln and Omaha facilities to ensure high availability and -disaster tolerance. The data and user activities on Attic are subject to -our -<a href="http://hcc.unl.edu/hcc-policies" class="external-link">HCC Policies</a>. - ---- -### Accounts and Cost - -To use Attic you will first need an -<a href="https://hcc.unl.edu/new-user-request" class="external-link">HCC account</a>, and -then you may request an -<a href="http://hcc.unl.edu/attic" class="external-link">Attic allocation</a>. -We charge a small fee per TB per year, but it is cheaper than most -commercial cloud storage solutions. For the user application form and -cost, please see the -<a href="http://hcc.unl.edu/attic" class="external-link">HCC Attic page</a>. - ---- -### Transfer Files Using Globus Connect - -The easiest and fastest way to access Attic is via Globus. You can -transfer files between your computer, our clusters ($HOME, $WORK, and $COMMON on -Crane or Rhino), and Attic. Here is a detailed tutorial on -how to set up and use [Globus Connect]({{< relref "/Data_Transfer/globus_connect" >}}). For -Attic, use the Globus Endpoint **hcc\#attic**. Your Attic files are -located at `~, `which is a shortcut -for `/attic/<groupname>/<username>`. -**Note:** *If you are accessing Attic files from your supplementary -group, you should explicitly set the path to -/attic/<supplementary\_groupname>/. If you don't do that, by -default the endpoint will try to place you in your primary group's Attic -path, to which access will be denied if the primary group doesn't have an Attic allocation.* - ---- -### Transfer Files Using SCP/SFTP/RSYNC - -The transfer server for Attic storage is `attic.unl.edu` (or `attic-xfer.unl.edu`). - -{{% panel theme="info" header="SCP Example" %}} -{{< highlight bash >}} -scp /source/file <username>@attic.unl.edu:~/destination/file -{{< /highlight >}} -{{% /panel %}} - -{{% panel theme="info" header="SFTP Example" %}} -{{< highlight bash >}} -sftp <username>@attic.unl.edu -Password: -Duo two-factor login for <username> -Connected to attic.unl.edu. -sftp> pwd -Remote working directory: /attic/<groupname>/<username> -sftp> put source/file destination/file -sftp> exit -{{< /highlight >}} -{{% /panel %}} - -{{% panel theme="info" header="RSYNC Example" %}} -{{< highlight bash >}} -# local to remote rsync command -rsync -avz /local/source/path <username>@attic.unl.edu:remote/destination/path - -# remote to local rsync command -rsync -avz <username>@attic.unl.edu:remote/source/path /local/destination/path -{{< /highlight >}} -{{% /panel %}} - -You can also access your data on Attic using our [high-speed -transfer servers]({{< relref "/Data_Transfer/high_speed_data_transfers" >}}) if you prefer. -Simply use scp or sftp to connect to one of the transfer servers, and -your directory is mounted at `/attic/<groupname>/<username>`. - ---- -### Check Attic Usage - -The usage and quota information for your group and the users in the -group are stored in a file named "disk\_usage.txt" in your group's -directory (`/attic/<groupname>`). You can use either [Globus Connect]({{< relref "/Data_Transfer/globus_connect" >}}) or -scp to download it. Your usage and expiration is also shown in the web -interface (see below). - ---- -### Use the web interface - -For convenience, a web interface is also provided. Simply go to -<a href="https://attic.unl.edu" class="external-link">https://attic.unl.edu</a> -and login with your HCC credentials. Using this interface, you can see -your quota usage and expiration, manage files, etc. **Please note we do -not recommend uploading/downloading large files this way**. Use one of -the other transfer methods above for large datasets. - - diff --git a/content/Data_Transfer/_index.md b/content/Data_Transfer/_index.md deleted file mode 100644 index d772f48c..00000000 --- a/content/Data_Transfer/_index.md +++ /dev/null @@ -1,10 +0,0 @@ -+++ -title = "Data Transfer" -description = "Transferring Data to and from HCC resources." -weight = "55" -+++ - -{{% children description="true" %}} - - - diff --git a/content/Data_Transfer/globus_connect/activating_hcc_cluster_endpoints.md b/content/Data_Transfer/globus_connect/activating_hcc_cluster_endpoints.md deleted file mode 100644 index e58f43d9..00000000 --- a/content/Data_Transfer/globus_connect/activating_hcc_cluster_endpoints.md +++ /dev/null @@ -1,39 +0,0 @@ -+++ -title = "Activating HCC Cluster Endpoints" -description = "How to activate HCC endpoints on Globus" -weight = 20 -+++ - -You will not be able to transfer files to or from an HCC endpoint using Globus Connect without first activating the endpoint. Endpoints are available for Crane (`hcc#crane`), Rhino, (`hcc#rhino`), and Attic (`hcc#attic`). Follow the instructions below to activate any of these endpoints and begin making transfers. - -1. [Sign in](https://www.globus.org/SignIn) to your Globus account using your campus credentials or your Globus ID (if you have one). Then click on 'Endpoints' in the left sidebar. -{{< figure src="/images/Glogin.png" >}} -{{< figure src="/images/endpoints.png" >}} - -2. Find the endpoint you want by entering '`hcc#crane`', '`hcc#rhino`', or '`hcc#attic`' in the search box and hit 'enter'. Once you have found and selected the endpoint, click the green 'activate' icon. On the following page, click 'continue'. -{{< figure src="/images/activateEndpoint.png" >}} -{{< figure src="/images/EndpointContinue.png" >}} - -3. You will be redirected to the HCC Globus Endpoint Activation page. Enter your *HCC* username and password (the password you usually use to log into the HCC clusters). -{{< figure src="/images/hccEndpoint.png" >}} - -4. Next you will be prompted to - provide your *Duo* credentials. If you use the Duo Mobile app on - your smartphone or tablet, select 'Duo Push'. Once you approve the notification that is sent to your phone, - the activation will be complete. If you use a Yubikey for - authentication, select the 'Passcode' option and then press your - Yubikey to complete the activation. Upon successful activation, you - will be redirected to your Globus *Manage Endpoints* page. -{{< figure src="/images/EndpointPush.png" >}} -{{< figure src="/images/endpointComplete.png" >}} - -The endpoint should now be ready -and will not have to be activated again for the next 7 days. -To transfer files between any two HCC clusters, you will need to -activate both endpoints individually. - -Next, learn how to [make file transfers between HCC endpoints]({{< relref "/Data_Transfer/globus_connect/file_transfers_between_endpoints" >}}) or how to [transfer between HCC endpoints and a personal computer]({{< relref "/Data_Transfer/globus_connect/file_transfers_to_and_from_personal_workstations" >}}). - ---- - - diff --git a/content/Data_Transfer/globus_connect/file_transfers_between_endpoints.md b/content/Data_Transfer/globus_connect/file_transfers_between_endpoints.md deleted file mode 100644 index 85cae545..00000000 --- a/content/Data_Transfer/globus_connect/file_transfers_between_endpoints.md +++ /dev/null @@ -1,50 +0,0 @@ -+++ -title = "File Transfers Between Endpoints" -description = "How to transfer files between HCC clusters using Globus" -weight = 30 -+++ - -To transfer files between HCC clusters, you will first need to -[activate]({{< relref "/Data_Transfer/globus_connect/activating_hcc_cluster_endpoints" >}}) the -two endpoints you would like to use (the available endpoints -are: `hcc#crane` `hcc#rhino`, and `hcc#attic`). Once -that has been completed, follow the steps below to begin transferring -files. (Note: You can also transfer files between an HCC endpoint and -any other Globus endpoint for which you have authorized access. That -may include a [personal -endpoint]({{< relref "/Data_Transfer/globus_connect/file_transfers_to_and_from_personal_workstations" >}}), -a [shared -endpoint]({{< relref "/Data_Transfer/globus_connect/file_sharing" >}}), -or an endpoint on another computing resource or cluster. Once the -endpoints have been activated, the file transfer process is generally -the same regardless of the type of endpoints you use. For demonstration -purposes we use two HCC endpoints.) - -1. Once both endpoints for the desired file transfer have been - activated, [sign in](https://www.globus.org/SignIn) to - your Globus account (if you are not already) and select - "Transfer or Sync to.." from the right sidebar. If you have - a small screen, you may have to click the menu icon - first. -{{< figure src="/images/Transfer.png">}} - -2. Enter the names of the two endpoints you would like to use, or - select from the drop-down menus (for - example, `hcc#attic` and `hcc#crane`). Enter the - directory paths for both the source and destination (the 'from' and - 'to' paths on the respective endpoints). Press 'Enter' to view files - under these directories. Select the files or directories you would - like to transfer (press *shift* or *control* to make multiple - selections) and click the blue highlighted arrow to start the - transfer. -{{< figure src="/images/startTransfer.png" >}} - -3. Globus will display a message when your transfer has completed - (or in the unlikely event that it was unsuccessful), and you will - also receive an email. Select the 'refresh' icon to see your file - in the destination folder. -{{< figure src="/images/transferComplete.png" >}} - ---- - - diff --git a/content/Data_Transfer/high_speed_data_transfers.md b/content/Data_Transfer/high_speed_data_transfers.md deleted file mode 100644 index 922eb4b4..00000000 --- a/content/Data_Transfer/high_speed_data_transfers.md +++ /dev/null @@ -1,28 +0,0 @@ -+++ -title = "High Speed Data Transfers" -description = "How to transfer files directly from the transfer servers" -weight = 10 -+++ - -Crane, Rhino, and Attic each have a dedicated transfer server with -10 Gb/s connectivity that allows -for faster data transfers than the login nodes. With [Globus -Connect]({{< relref "globus_connect" >}}), users -can take advantage of this connection speed when making large/cumbersome -transfers. - -Those who prefer scp, sftp or -rsync clients can also benefit from this high-speed connectivity by -using these dedicated servers for data transfers: - -Cluster | Transfer server -----------|---------------------- -Crane | `crane-xfer.unl.edu` -Rhino | `rhino-xfer.unl.edu` -Attic | `attic-xfer.unl.edu` - -{{% notice info %}} -Because the transfer servers are login-disabled, third-party transfers -between `crane-xfer`, and `attic-xfer` must be done via [Globus Connect]({{< relref "globus_connect" >}}). -{{% /notice %}} - diff --git a/content/Events/2013/HCC_Supercomputing_Symposium_2013.md b/content/Events/2013/HCC_Supercomputing_Symposium_2013.md index 34a8124b..e69de29b 100644 --- a/content/Events/2013/HCC_Supercomputing_Symposium_2013.md +++ b/content/Events/2013/HCC_Supercomputing_Symposium_2013.md @@ -1,31 +0,0 @@ -+++ -title = "HCC Supercomputing Symposium 2013" -description = "HCC Supercomputing Symposium 2013" -+++ - -<strong>Location: Wick Alumni Center, Dana Board Room</strong><br> -The morning sessions will be interactive – attendees should bring a laptop if at all possible! The first sessions will be primarily planned tutorial format, while the “Open Lab” will involve more individualized help of those in attendance as desired. - -| Time | Title | Description | -|---------|-------------------------------------------------------|------------------------------------------------------------------| -| 9 - 10am| Getting started at HCC | HCC 101 (Linux Primer, Job submission) [Quick Start Guides](https://hcc.unl.edu/docs/Quickstarts/) | -| 10 - 11am | Job management | Creating and deploying parallel jobs at HCC, OSG [Submitting Jobs](https://hcc.unl.edu/docs/guides/submitting_jobs/) | -| 11 - 11:45am | HCC Open Lab | Presentations and help from HCC staff | -| 12 - 1pm | LUNCH lecture with UNIT | Carl Lundstedt: How Big Data Allows Us to Investigate the Smallest Things | -|1:15 – 2pm | New User Spotlight – Steve Kolbe, Theatre | Media Arts Film Rendering @ HCC | -| 2 - 3pm | State of HCC; Crane Announcement | David Swanson | - -1. <span style="line-height: 1.4285715;">Getting Started at HCC (9-10am</span> - 1. [Getting an account at HCC](https://hcc.unl.edu/new-user-request) - 2. Logging into HCC resources for [windows](https://hcc.unl.edu/docs/Quickstarts/connecting/for_windows_users/) or [Mac/Linux Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_maclinux_users/) - 3. Basic Linux commands - 4. [Transferring files to HCC](https://hcc.unl.edu/docs/guides/handling_data/) -2. Job Management (10-11am) - 1. [End to End MPI](https://hcc.unl.edu/docs/guides/running_applications/mpi_jobs_on_hcc/) example. Including how to transfer source code and compile an MPI application on Tusker. - 2. [Compiling](https://hcc.unl.edu/docs/guides/running_applications/compiling_source_code/compiling_an_openmp_application/) and [submitting](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_an_openmp_job/) multi-thread and OpenMP applications. - 3. Using scheduler features such as [Arrays](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_a_job_array/ -) and [Dependencies](https://hcc.unl.edu/docs/guides/submitting_jobs/job_dependencies/ -). - 4. Debugging on Tusker by using [interactive submission](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_an_interactive_job/). - <span style="line-height: 1.4285715;"><lunch></span> - diff --git a/content/Events/2013/hcc_condor_workshop_2013.md b/content/Events/2013/hcc_condor_workshop_2013.md index 1561d15b..e69de29b 100644 --- a/content/Events/2013/hcc_condor_workshop_2013.md +++ b/content/Events/2013/hcc_condor_workshop_2013.md @@ -1,50 +0,0 @@ -+++ -title = "HCC Condor Workshop 2013" -description = "HCC Condor Workshop 2013." -+++ - -Condor -============ - -Workshop presentation slides -<[condor\_workshop.pdf](https://unl.box.com/s/ou8tf62bqkbrh7yx0cl4me1zbp3z0j08)> - -| Time | Title | Description | -|---------|-------------------------|------------------------------------------------| -| 1pm-2pm | Demo/Hand-On Practice | Executing Condor jobs on HCC supertcomputers | -| 2pm-3pm | Individual Consultation | Bring your research code on HCC supercomputers | - -Password for all demo accounts is **HCC\_condor2013**. -Replace `<group name>` with `demo` and `<username>` with `demoXX` (e.g. `demo01`). - -- Get started with [For Windows Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_windows_users/) - or [For Mac/Linux Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_maclinux_users/). -- Sample code for Fortran/C on HCC: - <[serial\_dir.zip](https://unl.box.com/s/khkpt68pe3k0lu2ythn9kzjeva40itdd)> -- Sample code for Condor on HCC: - <[condor\_dir.zip](https://unl.box.com/s/qpvgnqr9ukjcmt0d2trde4qcp5e5ez17)> -- For more details of this demonstration, see [Condor Jobs on - HCC](https://hcc.unl.edu/docs/guides/submitting_jobs/condor_jobs_on_hcc/). - -MATLAB -============ -Workshop presentation slides -<[matlab\_workshop.pdf](https://unl.box.com/s/ou8tf62bqkbrh7yx0cl4me1zbp3z0j08)> - -| Time | Title | Description | -|---------|-------------------------|---------------------------------------------------| -| 1pm-2pm | Demo/Hand-On Practice | Implementing a MATLAB code on HCC supertcomputers | -| 2pm-3pm | Individual Consultation | Bring your MATLAB code on HCC supercomputers | - -Password for all demo accounts is **HCC\_matlab2013**. -Replace `<group name>` with `demo` and `<username>` with `demoXX` (e.g. `demo01`). - - -- Get started with [For Windows Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_windows_users/) - or [For Mac/Linux Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_maclinux_users/). -- Hands On: [MATLAB on HCC](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_matlab_jobs/) -- Sample code for MATLAB on HCC: - <[matlab\_dir.zip](https://unl.box.com/s/u19fy7cjeswfl1wi7h1nkeeie8gl3z90)> -- Sample code for Parallel MATLAB Job: - <[parallel\_matlab\_dir.zip](https://unl.box.com/s/fhl3kf6hg8dmtozkphq3u2r58yexrppe)> - diff --git a/content/Events/2013/hcc_matlab_workshop_2013.md b/content/Events/2013/hcc_matlab_workshop_2013.md index cf2df3c8..e69de29b 100644 --- a/content/Events/2013/hcc_matlab_workshop_2013.md +++ b/content/Events/2013/hcc_matlab_workshop_2013.md @@ -1,29 +0,0 @@ -+++ -title = "HCC Matlab Workshop 2013" -description = "HCC Matlab Workshop 2013." -+++ - -Workshop presentation slides -<[matlab\_workshop.pdf](https://unl.box.com/s/lulbnbrnr7xqwufrx5s20gxnyxpykn7j)> - -| Time | Title | Description | -|-------------|-------------------------|--------------------------------------------------| -| UNMC | | | -| 10am - 11am | Demo/Hand-On Practice | Implementing a MATLAB code on HCC supercomputers | -| 11am - 12pm | Individual Consultation | Bring your MATLAB code on HCC supercomputers | -| UNO | | | -| 2pm - 3pm | Demo/Hand-On Practice | Implementing a MATLAB code on HCC supercomputers | -| 3pm - 4pm | Individual Consultation | Bring your MATLAB code on HCC supercomputers | - - -Password for all demo accounts is **HCC\_matlab2013**. -Replace `<group name>` with `demo` and `<username>` with `demoXX` -(e.g. `demo01`). - -- Get started with [For Windows Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_windows_users/) - or [For Mac/Linux Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_maclinux_users/). -- Hands On: [MATLAB on HCC](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_matlab_jobs/) -- Sample code for MATLAB on HCC: - <[matlab\_dir.zip](https://unl.box.com/s/u19fy7cjeswfl1wi7h1nkeeie8gl3z90)> -- Sample code for Parallel MATLAB Job: - <[parallel\_matlab\_dir.zip](https://unl.box.com/s/fhl3kf6hg8dmtozkphq3u2r58yexrppe)> diff --git a/content/Events/2013/hcc_mpi_workshop_2013.md b/content/Events/2013/hcc_mpi_workshop_2013.md index 02dc70cf..e69de29b 100644 --- a/content/Events/2013/hcc_mpi_workshop_2013.md +++ b/content/Events/2013/hcc_mpi_workshop_2013.md @@ -1,25 +0,0 @@ -+++ -title = "HCC MPI Workshop 2013" -description = "HCC MPI Workshop 2013." -+++ - -Workshop presentation slides <[mpi\_workshop.pdf](https://unl.box.com/s/7rufq2a39n2vektg19ko9fjcbios4kqp)> - -| Time | Title | Description | -|---------|-------------------------|------------------------------------------------| -| 1pm-2pm | Demo/Hand-On Practice | Implementing a MPI code on HCC supertcomputers | -| 2pm-3pm | Individual Consultation | Bring your research code on HCC supercomputers | - -Password for all demo accounts is **HCC\_mpi2013**. -Replace `<group name>` with `demo` and `<username>` with `demoXX` (e.g. `demo01`). - -- Get started with [For Windows Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_windows_users/) - or [For Mac/Linux Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_maclinux_users/). -- Sample code for Fortran/C on HCC: - <[serial\_dir.zip](https://unl.box.com/s/khkpt68pe3k0lu2ythn9kzjeva40itdd)> -- Sample code for MPI on HCC: - <[mpi\_dir.zip](https://unl.box.com/s/fekjdxx82gxp3x75nbsrqmrri52d4zlp)> -- For more details of this demonstration, see [MPI Jobs on - HCC](https://hcc.unl.edu/docs/guides/submitting_jobs/submitting_an_mpi_job/). - - diff --git a/content/Events/2016/hcc_qiime_workshop_2016.md b/content/Events/2016/hcc_qiime_workshop_2016.md index a7662bd5..e69de29b 100644 --- a/content/Events/2016/hcc_qiime_workshop_2016.md +++ b/content/Events/2016/hcc_qiime_workshop_2016.md @@ -1,71 +0,0 @@ -+++ -title = "HCC QIIME Workshop 2016" -description = "HCC QIIME Workshop 2016." -+++ - -When: October 20, 2016 - -Where: NIC Food Innovation Campus room 277 - - 1901 N. 21st St., Lincoln, 68588 - -[Details and Agenda](https://hcc.unl.edu/hcc-qiime-workshop) - ------------------------------------------------------------------------- - -Materials ---------- - -Slides for morning -introduction: [QIIME\_2016\_intro.pdf](https://unl.box.com/s/d2o92uoar7x59a7rxl0f3y7o3ncy0swq) - -Software Carpentry Unix Shell -Lessons: [http://eharstad.github.io/shell-novice/](http://eharstad.github.io/shell-novice/) - -Slides for HCC -Overview: [2016\_Qiime.pdf](https://unl.box.com/s/4gryu7ny8lo95fs8clyu566k2ubbhtef) - -QIIME -slides: [QIIME\_Talk\_Oct20.pdf](https://unl.box.com/s/h4zi0h27t1t16u9due3ybfrxmsdebzin) - -QIIME -tutorial: [http://bioinformatics.unl.edu/qiime_tutorial.pdf](http://bioinformatics.unl.edu/qiime_tutorial.pdf) - ------------------------------------------------------------------------- - -**What you need to do before Thursday October 20th** ----------------------------------------------------- - -*Participants will need to bring a laptop with them to the workshop.* In addition, there are a few things that are required to be completed before you arrive: --------------------------------------------------------------------------------------------------------------------------------------------------------------- - -1. To participate in this workshop, you will need to - [sign up for an HCC account](http://hcc.unl.edu/new-user-request) - (if you do not already have one). Please complete the account - application and [DUO two-factor authentication - setup](https://hcc-docs.unl.edu/display/HCCDOC/Setting+up+and+using+Duo) - before Thursday. This process requires advisor approval (via email) - and a visit to the HCC offices in the Schorr Center to activate your - account, so it can sometimes take a day or more to complete. Please - plan accordingly. If you still do not have your account set up by - Thursday, please arrive at the workshop by 8:30AM to get help. -2. Once you have your HCC account and DUO set up, please make sure that - you are able to log into the HCC clusters (instructions for [Windows - Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_windows_users/) - and [Mac/Linux - Users](https://hcc.unl.edu/docs/Quickstarts/connecting/for_maclinux_users/). - If you have any problems logging in, please email us at - [hcc-support@unl.edu](mailto:hcc-support@unl.edu) -3. On Thursday we will be demonstrating how to transfer files using - Globus Connect. Before arriving at the workshop, please log in to - the - [Globus Web App](https://www.globus.org/app/transfer) - using your My.UNL credentials (choose *University of - Nebraska-Lincoln* from the drop down menu). Next, install the - Globus Connect Personal app on your laptop (directions for - [Mac OS X](https://docs.globus.org/how-to/globus-connect-personal-mac/), - [Linux](https://docs.globus.org/how-to/globus-connect-personal-linux/), - and - [Windows](https://docs.globus.org/how-to/globus-connect-personal-windows/) - so that you can begin using Globus to transfer data to and from your - laptop. diff --git a/content/Events/2017/unk_linear_algebra_feb_28th_2017.md b/content/Events/2017/unk_linear_algebra_feb_28th_2017.md index 6be6caaf..e69de29b 100644 --- a/content/Events/2017/unk_linear_algebra_feb_28th_2017.md +++ b/content/Events/2017/unk_linear_algebra_feb_28th_2017.md @@ -1,75 +0,0 @@ -+++ -title = "UNK Linear Algebra, Feb. 28, 2017" -description = "UNK Linear Algebra, Feb. 28, 2017." -+++ - -**If at any time today you have difficulties or become lost, please -place the <span style="color: rgb(255,0,0);">red</span> sticky note on -top of your monitor and a helper will be around to assist you.** - - For these instructions, any commands to be typed into the terminal will be formatted like this. - -**What is a cluster:** ----------------------- - - - -(picture courtesy of: -[http://training.h3abionet.org/technical_workshop_2015/?page_id=403](http://training.h3abionet.org/technical_workshop_2015/?page_id=403)) - -** -** - -**To connect to the Crane cluster:** ------------------------------------- - -- insert the Yubikey into the computer's USB drive. There should be a - small green light in the middle of the Yubikey to indicate it is - inserted correctly. -- Open your preferred web browser and navigate to - [http://go.unl.edu/cranessh](http://go.unl.edu/cranessh) -- Click "Start SSH session to crane.unl.edu" -{{% notice info %}} -The link above is no longer available. If you wish to use a terminal in your browser, Sandstone is an option [https://hcc.unl.edu/docs/guides/sandstone/](https://hcc.unl.edu/docs/guides/sandstone/) -{{% /notice %}} -  -- Click the "Terminal: SSH" icon to begin the SSH session -  -- Type in the provided Username and Password. Note that the password - will not display on screen, but rest assured that even though - nothing is being output, your password is being entered as you type. -- At the "Passcode:" prompt, press your finger to the gold circle in - the middle of the Yubikey until a string of characters appears on - screen. -  -- If you logged in successfully, your screen should look similar to - the one belo -  - -**Linux Commands Reference List:** ----------------------------------- -https://hcc.unl.edu/docs/Quickstarts/connecting/basic_linux_commands/ - -**To run MATLAB interactively:** --------------------------------- - -- After logging into the cluster, navigate to your $WORK directory: - - cd $WORK - -- Request an interactive job: - - srun --reservation=unk --mem=4096 --pty $SHELL - -- Load the MATLAB module: - - module load matlab - -- Run MATLAB: - - matlab - -**To access the MATLAB Tutorial:** ----------------------------------- - -- Navigate to your $WORK directory: - - cd $WORK - -- Clone the github repo containing the tutorial files: - - git clone https://github.com/unlhcc/HCCWorkshops.git diff --git a/content/Events/2017/unl_r_for_biologists_class_march_8_2017.md b/content/Events/2017/unl_r_for_biologists_class_march_8_2017.md index 1be4d7eb..e69de29b 100644 --- a/content/Events/2017/unl_r_for_biologists_class_march_8_2017.md +++ b/content/Events/2017/unl_r_for_biologists_class_march_8_2017.md @@ -1,114 +0,0 @@ -+++ -title = "R for Biologists, March 8, 2017" -description = "R for Biologists, March 8, 2017." -+++ - -**We will be utilizing <span style="color: rgb(255,0,0);">red</span> and -<span style="color: rgb(51,153,102);">green</span> sticky notes today. -If you run into problems or have questions,** - -**please place the <span style="color: rgb(255,0,0);">red</span> sticky -note to the back of your computer screen and a helper will assist you.** - - - -If you have not already requested an HCC account under the rcourse998 -group, please do so -[here](https://hcc.unl.edu/new-user-request) - -If you already have an HCC account and need to be added to the -rcourse998 group, please let us know. - -If you have not previously set up Duo Authentication, please ask for -assistance. - - - -**Set up Instructions:** - -**Windows:** - -For Windows will we use two third party -application **PuTTY** and **WinSCP** for demonstration. - -PuTTY: -<[http://www.putty.org/](http://www.putty.org/)> - -WinSCP: -< [http://winscp.net/eng/download.php](http://winscp.net/eng/download.php)> - -**Mac/Linux:** - -Mac and Linux users will need to download and install **Cyberduck**. -Detailed information for downloading and setting up Cyberduck can be -found here: [For Mac/Linux Users](https://cyberduck.io/) - - - -**Linux Commands Reference List:** - -[https://hcc.unl.edu/docs/Quickstarts/connecting/basic_linux_commands/](https://hcc.unl.edu/docs/Quickstarts/connecting/basic_linux_commands/) - - - - **R core and R Studio:** - -We will be writing scripts offline in RStudio and then uploading them to -execute them on the cluster. This lesson assumes you have the R core and -RStudio installed. If you do not you can install them here: - -R -core: [https://cloud.r-project.org/](https://cloud.r-project.org/) - -RStudio: [https://www.rstudio.com/products/rstudio/download/](https://www.rstudio.com/products/rstudio/download/) - - - -**Required Packages:** - -We will also be using the dplyr, ggplot2 and maps package. If you do not -have these installed, please install them now. You can do so using the -following commands inside the RStudio console: - - install.packages("dplyr") - - install.packages("ggplot2") - - install.packages("maps") - - - -**What is a cluster:** - - - -(picture courtesy -of: [http://training.h3abionet.org/technical_workshop_2015/?page_id=403](http://training.h3abionet.org/technical_workshop_2015/?page_id=403)) - -### To download the tutorial files: - -- Navigate to your $WORK directory: - - cd $WORK - -- Clone the github repo containing the tutorial files: - - git clone https://github.com/unlhcc/HCCWorkshops.git - - - -Take Home Exercise: - -[Data Analysis in R](https://unl.box.com/s/8i647f8are21tc11la0jqk2xddlg19wy) - Please note -that the on the bottom of page three, there is a missing parenthesis at -the end of the last command. - -The final code chunk should read: - - # Calculate flight age using birthmonth - - age <- data.frame(names(acStart), acStart, stringsAsFactors=FALSE) - - colnames(age) <- c("TailNum", "acStart") - - flights <- left_join(flights, age, by="TailNum") - - flights <- mutate(flights, Age = (flights$Year * 12) + flights$Month - flights$acStart) \ No newline at end of file diff --git a/content/Events/_index.md b/content/Events/_index.md index c225c4e9..16ad5fd8 100644 --- a/content/Events/_index.md +++ b/content/Events/_index.md @@ -2,6 +2,7 @@ title = "Events" description = "Historical listing of various HCC events." weight = "70" +hidden = true +++ Historical listing of HCC Events diff --git a/content/FAQ/_index.md b/content/FAQ/_index.md index eddc1c1a..e69de29b 100644 --- a/content/FAQ/_index.md +++ b/content/FAQ/_index.md @@ -1,177 +0,0 @@ -+++ -title = "FAQ" -description = "HCC Frequently Asked Questions" -weight = "10" -+++ - -- [I have an account, now what?](#i-have-an-account-now-what) -- [How do I change my password?](#how-do-i-change-my-password) -- [I forgot my password, how can I retrieve it?](#i-forgot-my-password-how-can-i-retrieve-it) -- [I just deleted some files and didn't mean to! Can I get them back?](#i-just-deleted-some-files-and-didn-t-mean-to-can-i-get-them-back) -- [How do I (re)activate Duo?](#how-do-i-re-activate-duo) -- [How many nodes/memory/time should I request?](#how-many-nodes-memory-time-should-i-request) -- [I am trying to run a job but nothing happens?](#i-am-trying-to-run-a-job-but-nothing-happens) -- [I keep getting the error "slurmstepd: error: Exceeded step memory limit at some point." What does this mean and how do I fix it?](#i-keep-getting-the-error-slurmstepd-error-exceeded-step-memory-limit-at-some-point-what-does-this-mean-and-how-do-i-fix-it) -- [I want to talk to a human about my problem. Can I do that?](#i-want-to-talk-to-a-human-about-my-problem-can-i-do-that) - ---- - -#### I have an account, now what? - -Congrats on getting an HCC account! Now you need to connect to a Holland -cluster. To do this, we use an SSH connection. SSH stands for Secure -Shell, and it allows you to securely connect to a remote computer and -operate it just like you would a personal machine. - -Depending on your operating system, you may need to install software to -make this connection. Check out on Quick Start Guides for information on -how to install the necessary software for your operating system - -- [For Mac/Linux Users]({{< relref "for_maclinux_users" >}}) -- [For Windows Users]({{< relref "for_windows_users" >}}) - -#### How do I change my password? - -#### I forgot my password, how can I retrieve it? - -Information on how to change or retrieve your password can be found on -the documentation page: [How to change your -password]({{< relref "/Accounts/how_to_change_your_password" >}}) - - -All passwords must be at least 8 characters in length and must contain -at least one capital letter and one numeric digit. Passwords also cannot -contain any dictionary words. If you need help picking a good password, -consider using a (secure!) password generator such as -[this one provided by Random.org](https://www.random.org/passwords) - -To preserve the security of your account, we recommend changing the -default password you were given as soon as possible. - -#### I just deleted some files and didn't mean to! Can I get them back? - -That depends. Where were the files you deleted? - -**If the files were in your $HOME directory (/home/group/user/):** It's -possible. - -$HOME directories are backed up daily and we can restore your files as -they were at the time of our last backup. Please note that any changes -made to the files between when the backup was made and when you deleted -them will not be preserved. To have these files restored, please contact -HCC Support at -{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) -as soon as possible. - -**If the files were in your $WORK directory (/work/group/user/):** No. - -Unfortunately, the $WORK directories are created as a short term place -to hold job files. This storage was designed to be quickly and easily -accessed by our worker nodes and as such is not conducive to backups. -Any irreplaceable files should be backed up in a secondary location, -such as Attic, the cloud, or on your personal machine. For more -information on how to prevent file loss, check out [Preventing File -Loss]({{< relref "preventing_file_loss" >}}). - -#### How do I (re)activate Duo? - -**If you have not activated Duo before:** - -Please stop by -[our offices](http://hcc.unl.edu/location) -along with a photo ID and we will be happy to activate it for you. If -you are not local to Omaha or Lincoln, contact us at -{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) -and we will help you activate Duo remotely. - -**If you have activated Duo previously but now have a different phone -number:** - -Stop by our offices along with a photo ID and we can help you reactivate -Duo and update your account with your new phone number. - -**If you have activated Duo previously and have the same phone number:** - -Email us at -{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) -from the email address your account is registered under and we will send -you a new link that you can use to activate Duo. - -#### How many nodes/memory/time should I request? - -**Short answer:** We don’t know. - -**Long answer:** The amount of resources required is highly dependent on -the application you are using, the input file sizes and the parameters -you select. Sometimes it can help to speak with someone else who has -used the software before to see if they can give you an idea of what has -worked for them. - -But ultimately, it comes down to trial and error; try different -combinations and see what works and what doesn’t. Good practice is to -check the output and utilization of each job you run. This will help you -determine what parameters you will need in the future. - -For more information on how to determine how many resources a completed -job used, check out the documentation on [Monitoring Jobs]({{< relref "monitoring_jobs" >}}). - -#### I am trying to run a job but nothing happens? - -Where are you trying to run the job from? You can check this by typing -the command \`pwd\` into the terminal. - -**If you are running from inside your $HOME directory -(/home/group/user/)**: - -Move your files to your $WORK directory (/work/group/user) and resubmit -your job. - -The worker nodes on our clusters have read-only access to the files in -$HOME directories. This means that when a job is submitted from $HOME, -the scheduler cannot write the output and error files in the directory -and the job is killed. It appears the job does nothing because no output -is produced. - -**If you are running from inside your $WORK directory:** - -Contact us at -{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) -with your login, the name of the cluster you are running on, and the -full path to your submit script and we will be happy to help solve the -issue. - -##### I keep getting the error "slurmstepd: error: Exceeded step memory limit at some point." What does this mean and how do I fix it? - -This error occurs when the job you are running uses more memory than was -requested in your submit script. - -If you specified `--mem` or `--mem-per-cpu` in your submit script, try -increasing this value and resubmitting your job. - -If you did not specify `--mem` or `--mem-per-cpu` in your submit script, -chances are the default amount allotted is not sufficient. Add the line - -{{< highlight batch >}} -#SBATCH --mem=<memory_amount> -{{< /highlight >}} - -to your script with a reasonable amount of memory and try running it again. If you keep -getting this error, continue to increase the requested memory amount and -resubmit the job until it finishes successfully. - -For additional details on how to monitor usage on jobs, check out the -documentation on [Monitoring Jobs]({{< relref "monitoring_jobs" >}}). - -If you continue to run into issues, please contact us at -{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) -for additional assistance. - -#### I want to talk to a human about my problem. Can I do that? - -Of course! We have an open door policy and invite you to stop by -[either of our offices](http://hcc.unl.edu/location) -anytime Monday through Friday between 9 am and 5 pm. One of the HCC -staff would be happy to help you with whatever problem or question you -have. Alternatively, you can drop one of us a line and we'll arrange a -time to meet: [Contact Us](https://hcc.unl.edu/contact-us). - diff --git a/content/Guides/_index.md b/content/Guides/_index.md deleted file mode 100644 index 11341179..00000000 --- a/content/Guides/_index.md +++ /dev/null @@ -1,10 +0,0 @@ -+++ -title = "Guides" -weight = "20" -draft = true -+++ - -In-depth guides to using HCC resources --------------------------------------- - -{{% children description="true" %}} diff --git a/content/Quickstarts/_index.md b/content/Quickstarts/_index.md deleted file mode 100755 index 95f2d05e..00000000 --- a/content/Quickstarts/_index.md +++ /dev/null @@ -1,10 +0,0 @@ -+++ -title = "Quickstarts" -weight = "15" -+++ - -The quick start guides require that you already have a HCC account. You -can get a HCC account by applying on the -[HCC website] (http://hcc.unl.edu/newusers/) - -{{% children %}} diff --git a/content/Quickstarts/connecting/_index.md b/content/Quickstarts/connecting/_index.md deleted file mode 100644 index 35324ea6..00000000 --- a/content/Quickstarts/connecting/_index.md +++ /dev/null @@ -1,20 +0,0 @@ -+++ -title = "How to Connect" -description = "What is a cluster and what is HPC" -weight = "9" -+++ -High-Performance Computing is the use of groups of computers to solve computations a user or group would not be able to solve in a reasonable time-frame on their own desktop or laptop. This is often achieved by splitting one large job amongst numerous cores or 'workers'. This is similar to how a skyscraper is built by numerous individuals rather than a single person. Many fields take advantage of HPC including bioinformatics, chemistry, materials engineering, and newer fields such as educational psychology and philosophy. -{{< figure src="/images/cluster.png" height="450" >}} -HPC clusters consist of four primary parts, the login node, management node, workers, and a central storage array. All of these parts are bound together with a scheduler such as HTCondor or SLURM. -</br></br> -#### Login Node: -Users will automatically land on the login node when they log in to the clusters. You will [submit jobs] ({{< ref "/Submitting_Jobs" >}}) using one of the schedulers and pull the results of your jobs. Running jobs on the login node directly will be stopped so others can use the login node to submit jobs. -</br></br> -#### Management Node: -The management node does as it sounds, it manages the cluster and provides a central point to manage the rest of the systems. -</br></br> -#### Worker Nodes: -The worker nodes are what run and process your jobs that are submitted from the schedulers. Through the use of the schedulers, more work can be efficiently done by squeezing in all jobs possible for the resources requested throughout the nodes. They also allow for fair use computing by making sure one user or group is not using the entire cluster at once and allowing others to use the clusters. -</br></br> -#### Central Storage Array: -The central storage array allows all of the nodes within the cluster to have access to the same files without needing to transfer them around. HCC has three arrays mounted on the clusters with more details [here]({{< ref "/Data_Storage/data_storage_overview" >}}). diff --git a/content/Quickstarts/connecting/how_to_change_your_password.md b/content/Quickstarts/connecting/how_to_change_your_password.md deleted file mode 100755 index 49032b7b..00000000 --- a/content/Quickstarts/connecting/how_to_change_your_password.md +++ /dev/null @@ -1,95 +0,0 @@ -+++ -title = "Changing Your Password" -description = "How to change your HCC password" -weight = "30" -+++ - -How to change your password ---------------------------- - -{{% notice info%}} -**Your account must be active with Duo authentication setup in order for -the following instructions to work.** -{{% /notice %}} - -- [HCC password requirements](#hcc-password-requirements) -- [Changing a known HCC password](#changing-a-known-hcc-password) - - [Change your password via the command line](#change-your-password-via-the-command-line) - - [Change your password using the myHCC User Portal](#change-your-password-using-the-myhcc-user-portal) -- [Resetting a forgotten HCC password](#resetting-a-forgotten-hcc-password) -- [Tutorial Video](#tutorial-video) - -Every HCC user has a password that is same on all HCC machines -(Crane, Rhino, Anvil). This password needs to satisfy the HCC -password requirements. - -### HCC password requirements - -All HCC passwords must meet the following requirements when they are -created or changed: - -- at least 8 characters long -- at least 1 capital letter -- at least 1 number -- **can not** contain an existing dictionary word -- **can not** contain information from user's account details (*username*, - *email*) - -Using special characters in the password is not required, but it is -allowed. Also, the system keeps a history of the user's previous two -passwords, and their reuse is not allowed. - -### Changing a known HCC password - -If you know your current HCC password, you can use one of two approaches -to change it: - -#### Change your password via the command line - -To change a current or temporary password, the user needs to login to -any HCC cluster and use the ***passwd*** command: - -**Change HCC password** - -{{< highlight bash >}} -$ passwd -Changing password for user <username>. -Current Password: -New password: -New password: -{{< /highlight >}} - -With this command, the user is first prompted for his/her old password. -If the "*Current Password*" is correct, then the user is asked twice for -a replacement password ("*New password*"). The replacement password -needs to fulfill the HCC password requirements. - -#### Change your password using the myHCC User Portal - -1. Login to the [myHCC User Portal](https://hcc.unl.edu/myhcc) - with your HCC credentials. -2. Click **Update Account** in the top menu - - {{< figure src="/images/35326617.png" height="150" >}} - -3. Enter your new password in the **Password** and **Retype Password** - boxes and click **Modify** to save - - {{< figure src="/images/35326618.png" height="150" >}} - -### Resetting a forgotten HCC password - -To reset your password, navigate to the [myHCC User Portal](https://hcc.unl.edu/myhcc/) in your browser - -Click the link to reset your forgotten password - -{{< figure src="/images/35326619.png" height="400" >}} - -Fill in the requested information (your HCC user name and email -associated with your account) and click **Reset Password**. A reset link -will be sent to your email address. Click this link and follow the -onscreen prompts to set a new password. - -### Tutorial Video - -{{< youtube eaTW6FDhpsM >}} diff --git a/content/Quickstarts/setting_up_and_using_duo.md b/content/Quickstarts/setting_up_and_using_duo.md deleted file mode 100755 index 04f0847d..00000000 --- a/content/Quickstarts/setting_up_and_using_duo.md +++ /dev/null @@ -1,146 +0,0 @@ -+++ -title = "Setting Up and Using Duo" -description = "Duo Setup Instructions" -weight = "8" -+++ - -##### Use of Duo two-factor authentication (https://www.duosecurity.com) is required for access to HCC resources. - -Users will connect via SSH and enter their username/passwords as usual. One additional -authentication step through Duo is then needed before the login is completed. This -second authentication can be in several different forms (cell phone, YubiKey hardware token), -and is user-selectable at each login. A brief description of each is provided -[below](#duo-authentication-methods). See the -[Duo Authentication Methods](https://www.duosecurity.com/authentication-methods) -page for more details. - -Initial Setup -------------- - -Most HCC account holders use the Duo Mobile application on their -smartphone or purchase a YubiKey USB device. - -### Smartphone - -1. Install the free **Duo Mobile** application from the - [Google Play Store](https://play.google.com/store/apps/details?id=com.duosecurity.duomobile), [Apple App Store](https://itunes.apple.com/us/app/duo-mobile/id422663827), or [Microsoft Store](https://www.microsoft.com/en-us/store/apps/duo-mobile/9nblggh08m1g) -2. Visit one of the following locations. **Bring your smartphone and a valid photo ID** such as your university ID card or drivers license. - 1. Visit either HCC location [118 Schorr Center, UNL](http://www1.unl.edu/tour/SHOR) | - [152 Peter Kiewit Institute, UNO](http://pki.nebraska.edu/new/pages/about-pki/maps-directions-and-parking) in-person anytime from 9am-5pm to enroll. - 2. Visit Information Technology Services [115 Otto Olsen, UNK](http://www.unk.edu/campus-map/?q=m15) - in-person and ask for HCC identity verification. - -Faculty/staff members with a verified NU telephone number can enroll by -phone. If you would like an HCC staff member to call your NU telephone -number to enroll, please email -{{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) -with a time you will be available. - -### YubiKeys - -YubiKey devices are currently a one-time cost of around $25 from HCC, or can be -purchased from Yubico and added in-person at either HCC location. -Purchasing a YubiKey from HCC must be done via a University cost object -transfer (HCC cannot accept cash or credit cards). Please bring the cost -object number with you if possible. YubiKeys are also available from the -Husker Tech store in the UNL City Union. Note that -YubiKeys are configured for HCC's Duo, and not for general YubiCloud or -U2F use. - -Example login using Duo Push ----------------------------- - -This demonstrates an example login to Crane using the Duo Push method. -Using another method (SMS, phone call, etc.) proceeds in the same way. - (Click on any image for a larger version.) - -First, a user connects via SSH using their normal HCC username/password, -exactly as before. - -{{< figure src="/images/5832713.png" width="600" >}} - -{{% notice warning%}}**Account lockout** - -After 10 failed authentication attempts, the user's account is -disabled. If this is the case, then the user needs to send an email to -[hcc-support@unl.edu] (mailto:hcc-support@unl.edu) -including his/her username and the reason why multiple failed -authentication attempts occurred. -{{% /notice %}} - -After entering the password, instead of completing the login, the user -will be presented with the Duo prompt. This gives the choice to use any -authentication method that the particular account is setup to use. In -this example, the choices are Duo Push notification, SMS message, or -phone call. Choosing option 1 for Duo Push, a request to verify the -login will be sent to the user's smartphone. - -{{< figure src="/images/5832716.png" height="350" >}} - -Simply tap `Approve` to verify the login. - -{{< figure src="/images/5832717.png" height="350" >}} - -{{% notice warning%}}**If you receive a verification request you didn't initiate, deny the -request and contact HCC immediately via email at -[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)** -{{% /notice %}} - -In the terminal, the login will now complete and the user will logged in -as usual. - -{{< figure src="/images/5832714.png" height="350" >}} - - -Duo Authentication Methods --------------------------- - -### Duo Push -##### [[Watch the Duo Push Demo]](https://www.duosecurity.com/duo-push) - -{{< figure src="/images/5832709.png" height="350" caption="Photo credit: https://duosecurity.com" >}} - -For smartphone or tablet users (iPhone, Android, Blackberry, Windows -Phone), the Duo Mobile app is available for free. A push notification -will be sent to the device, and users can simply confirm the login with -one tap. - -### Duo Mobile Passcodes - -{{< figure src="/images/5832711.png" height="350" caption="Photo credit: https://duosecurity.com" >}} - -The Duo Mobile app can also be used to generate numeric passcodes, even -when internet and cell service is unavailable. Press the key icon to -generate a passcode. The passcode is then entered manually at the login -prompt to complete authentication. - -### SMS Passcodes - - -{{< figure src="/images/5832712.png" height="350" >}} - -For non-smartphone users, Duo can send passcodes via normal text -messages which are entered manually to complete login. Please note since -this is an SMS message it may not be free, depending on the details of -the particular cell phone plan. - -### Phone Callback - -For users with cell phones who prefer not to use any of the above -methods and for those with landline phones, Duo will call the phone and -provide a passcode via automatic voice message. The passcode is then -entered manually to complete the login. - -### YubiKey -##### [[Yubico]](http://www.yubico.com/) - -{{< figure src="/images/5832710.jpg" height="200" caption="Photo credit: Yubico" >}} - -YubiKeys are USB hardware tokens that generate passcodes when pressed. -They appear as a USB keyboard to the computer they are connected to, and -so require no driver software with almost all modern operating systems. -YubiKeys are available from the Husker Tech store at UNL. Users may also purchase them directly from -[Yubico](https://store.yubico.com) if desired; this does require stopping -by either HCC location in person to have the YubiKey added to the user's account. -For your convenience, HCC often carries some YubiKeys as well; these may only be purchased via a -Cost Object transfer. diff --git a/content/Submitting_Jobs/_index.md b/content/Submitting_Jobs/_index.md deleted file mode 100644 index 9ac9e76b..00000000 --- a/content/Submitting_Jobs/_index.md +++ /dev/null @@ -1,197 +0,0 @@ -+++ -title = "Submitting Jobs" -description = "How to submit jobs to HCC resources" -weight = "35" -+++ - -Crane and Rhino are managed by -the [SLURM](https://slurm.schedmd.com) resource manager. -In order to run processing on Crane or Rhino, you -must create a SLURM script that will run your processing. After -submitting the job, SLURM will schedule your processing on an available -worker node. - -Before writing a submit file, you may need to compile your application. - -- [Ensure proper working directory for job output](#ensure-proper-working-directory-for-job-output) -- [Creating a SLURM Submit File](#creating-a-slurm-submit-file) -- [Submitting the job](#submitting-the-job) -- [Checking Job Status](#checking-job-status) - - [Checking Job Start](#checking-job-start) -- [Next Steps](#next-steps) - - -### Ensure proper working directory for job output - -{{% notice info %}} -All SLURM job output should be directed to your /work path. -{{% /notice %}} - -{{% panel theme="info" header="Manual specification of /work path" %}} -{{< highlight bash >}} -$ cd /work/[groupname]/[username] -{{< /highlight >}} -{{% /panel %}} - -The environment variable `$WORK` can also be used. -{{% panel theme="info" header="Using environment variable for /work path" %}} -{{< highlight bash >}} -$ cd $WORK -$ pwd -/work/[groupname]/[username] -{{< /highlight >}} -{{% /panel %}} - -Review how /work differs from /home [here.]({{< relref "/guides/handling_data/_index.md" >}}) - -### Creating a SLURM Submit File - -{{% notice info %}} -The below example is for a serial job. For submitting MPI jobs, please -look at the [MPI Submission Guide.]({{< relref "submitting_an_mpi_job" >}}) -{{% /notice %}} - -A SLURM submit file is broken into 2 sections, the job description and -the processing. SLURM job description are prepended with `#SBATCH` in -the submit file. - -**SLURM Submit File** - -{{< highlight batch >}} -#!/bin/sh -#SBATCH --time=03:15:00 # Run time in hh:mm:ss -#SBATCH --mem-per-cpu=1024 # Maximum memory required per CPU (in megabytes) -#SBATCH --job-name=hello-world -#SBATCH --error=/work/[groupname]/[username]/job.%J.err -#SBATCH --output=/work/[groupname]/[username]/job.%J.out - -module load example/test - -hostname -sleep 60 -{{< /highlight >}} - -- **time** - Maximum walltime the job can run. After this time has expired, the - job will be stopped. -- **mem-per-cpu** - Memory that is allocated per core for the job. If you exceed this - memory limit, your job will be stopped. -- **mem** - Specify the real memory required per node in MegaBytes. If you - exceed this limit, your job will be stopped. Note that for you - should ask for less memory than each node actually has. For - instance, Rhino has 1TB, 512GB, 256GB, and 192GB of RAM per node. You may - only request 1000GB of RAM for the 1TB node, 500GB of RAM for the - 512GB nodes, 250GB of RAM for the 256GB nodes, and 187.5GB for the 192 nodes. - For Crane, the max is 500GB. -- **job-name** - The name of the job. Will be reported in the job listing. -- **partition** - The partition the job should run in. Partitions determine the job's - priority and on what nodes the partition can run on. See the - [Partitions]({{< relref "partitions" >}}) page for a list of possible partitions. -- **error** - Location of the stderr will be written for the job. `[groupname]` - and `[username]` should be replaced your group name and username. - Your username can be retrieved with the command `id -un` and your - group with `id -ng`. -- **output** - Location of the stdout will be written for the job. - -More advanced submit commands can be found on the [SLURM Docs](https://slurm.schedmd.com/sbatch.html). -You can also find an example of a MPI submission on [Submitting an MPI Job]({{< relref "submitting_an_mpi_job" >}}). - -### Submitting the job - -Submitting the SLURM job is done by command `sbatch`. SLURM will read -the submit file, and schedule the job according to the description in -the submit file. - -Submitting the job described above is: - -{{% panel theme="info" header="SLURM Submission" %}} -{{< highlight batch >}} -$ sbatch example.slurm -Submitted batch job 24603 -{{< /highlight >}} -{{% /panel %}} - -The job was successfully submitted. - -### Checking Job Status - -Job status is found with the command `squeue`. It will provide -information such as: - -- The State of the job: - - **R** - Running - - **PD** - Pending - Job is awaiting resource allocation. - - Additional codes are available - on the [squeue](http://slurm.schedmd.com/squeue.html) - page. -- Job Name -- Run Time -- Nodes running the job - -Checking the status of the job is easiest by filtering by your username, -using the `-u` option to squeue. - -{{< highlight batch >}} -$ squeue -u <username> - JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) - 24605 batch hello-wo <username> R 0:56 1 b01 -{{< /highlight >}} - -Additionally, if you want to see the status of a specific partition, for -example if you are part of a [partition]({{< relref "partitions" >}}), -you can use the `-p` option to `squeue`: - -{{< highlight batch >}} -$ squeue -p esquared - JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) - 73435 esquared MyRandom tingting R 10:35:20 1 ri19n10 - 73436 esquared MyRandom tingting R 10:35:20 1 ri19n12 - 73735 esquared SW2_driv hroehr R 10:14:11 1 ri20n07 - 73736 esquared SW2_driv hroehr R 10:14:11 1 ri20n07 -{{< /highlight >}} - -#### Checking Job Start - -You may view the start time of your job with the -command `squeue --start`. The output of the command will show the -expected start time of the jobs. - -{{< highlight batch >}} -$ squeue --start --user lypeng - JOBID PARTITION NAME USER ST START_TIME NODES NODELIST(REASON) - 5822 batch Starace lypeng PD 2013-06-08T00:05:09 3 (Priority) - 5823 batch Starace lypeng PD 2013-06-08T00:07:39 3 (Priority) - 5824 batch Starace lypeng PD 2013-06-08T00:09:09 3 (Priority) - 5825 batch Starace lypeng PD 2013-06-08T00:12:09 3 (Priority) - 5826 batch Starace lypeng PD 2013-06-08T00:12:39 3 (Priority) - 5827 batch Starace lypeng PD 2013-06-08T00:12:39 3 (Priority) - 5828 batch Starace lypeng PD 2013-06-08T00:12:39 3 (Priority) - 5829 batch Starace lypeng PD 2013-06-08T00:13:09 3 (Priority) - 5830 batch Starace lypeng PD 2013-06-08T00:13:09 3 (Priority) - 5831 batch Starace lypeng PD 2013-06-08T00:14:09 3 (Priority) - 5832 batch Starace lypeng PD N/A 3 (Priority) -{{< /highlight >}} - -The output shows the expected start time of the jobs, as well as the -reason that the jobs are currently idle (in this case, low priority of -the user due to running numerous jobs already). - -#### Removing the Job - -Removing the job is done with the `scancel` command. The only argument -to the `scancel` command is the job id. For the job above, the command -is: - -{{< highlight batch >}} -$ scancel 24605 -{{< /highlight >}} - -### Next Steps - -{{% children %}} diff --git a/content/Submitting_Jobs/condor_jobs_on_hcc.md b/content/Submitting_Jobs/condor_jobs_on_hcc.md deleted file mode 100644 index 1b9db0ae..00000000 --- a/content/Submitting_Jobs/condor_jobs_on_hcc.md +++ /dev/null @@ -1,219 +0,0 @@ -+++ -title = "Condor Jobs on HCC" -description = "How to run jobs using Condor on HCC machines" -weight = "54" -+++ - -This quick start demonstrates how to run multiple copies of Fortran/C program -using Condor on HCC supercomputers. The sample codes and submit scripts -can be downloaded from [condor_dir.zip](/attachments/3178558.zip). - -#### Login to a HCC Cluster - -Log in to a HCC cluster through PuTTY ([For Windows Users]({{< relref "/Quickstarts/connecting/for_windows_users">}})) or Terminal ([For Mac/Linux Users]({{< relref "/Quickstarts/connecting/for_maclinux_users">}})) and make a subdirectory called `condor_dir` under the `$WORK` directory. In the subdirectory `condor_dir`, create job subdirectories that host the input data files. Here we create two job subdirectories, `job_0` and `job_1`, and put a data file (`data.dat`) in each subdirectory. The data file in `job_0` has a column of data listing the integers from 1 to 5. The data file in `job_1` has a integer list from 6 to 10. - -{{< highlight bash >}} -$ cd $WORK -$ mkdir condor_dir -$ cd condor_dir -$ mkdir job_0 -$ mkdir job_1 -{{< /highlight >}} - -In the subdirectory condor`_dir`, save all the relevant codes. Here we -include two demo programs, `demo_f_condor.f90` and `demo_c_condor.c`, -that compute the sum of the data stored in each job subdirectory -(`job_0` and `job_1`). The parallelization scheme here is as the -following. First, the master computer node send out many copies of the -executable from the `condor_dir` subdirectory and a copy of the data -file in each job subdirectories. The number of executable copies is -specified in the submit script (`queue`), and it usually matches with -the number of job subdirectories. Next, the workload is distributed -among a pool of worker computer nodes. At any given time, the number of -available worker nodes may vary. Each worker node executes the jobs -independent of other worker nodes. The output files are separately -stored in the job subdirectory. No additional coding are needed to make -the serial code turned "parallel". Parallelization here is achieved -through the submit script. - -{{%expand "demo_condor.f90" %}} -{{< highlight fortran >}} -Program demo_f_condor - implicit none - integer, parameter :: N = 5 - real*8 w - integer i - common/sol/ x - real*8 x - real*8, dimension(N) :: y_local - real*8, dimension(N) :: input_data - - open(10, file='data.dat') - - do i = 1,N - read(10,*) input_data(i) - enddo - - do i = 1,N - w = input_data(i)*1d0 - call proc(w) - y_local(i) = x - write(6,*) 'i,x = ', i, y_local(i) - enddo - write(6,*) 'sum(y) =',sum(y_local) -Stop -End Program -Subroutine proc(w) - real*8, intent(in) :: w - common/sol/ x - real*8 x - - x = w - -Return -End Subroutine -{{< /highlight >}} -{{% /expand %}} - - -{{%expand "demo_c_condor.c" %}} -{{< highlight c >}} -//demo_c_condor -#include <stdio.h> - -double proc(double w){ - double x; - x = w; - return x; -} - -int main(int argc, char* argv[]){ - int N=5; - double w; - int i; - double x; - double y_local[N]; - double sum; - double input_data[N]; - FILE *fp; - fp = fopen("data.dat","r"); - for (i = 1; i<= N; i++){ - fscanf(fp, "%lf", &input_data[i-1]); - } - - for (i = 1; i <= N; i++){ - w = input_data[i-1]*1e0; - x = proc(w); - y_local[i-1] = x; - printf("i,x= %d %lf\n", i, y_local[i-1]) ; - } - - sum = 0e0; - for (i = 1; i<= N; i++){ - sum = sum + y_local[i-1]; - } - - printf("sum(y)= %lf\n", sum); -return 0; -} -{{< /highlight >}} -{{% /expand %}} - ---- - -#### Compiling the Code - -The compiled executable needs to match the "standard" environment of the -worker node. The easies way is to directly use the compilers installed -on the HCC supercomputer without loading extra modules. The standard -compiler of the HCC supercomputer is GNU Compier Collection. The version -can be looked up by the command lines `gcc -v` or `gfortran -v`. - - -{{< highlight bash >}} -$ gfortran demo_f_condor.f90 -o demo_f_condor.x -$ gcc demo_c_condor.c -o demo_c_condor.x -{{< /highlight >}} - -#### Creating a Submit Script - -Create a submit script to request 2 jobs (queue). The name of the job -subdirectories is specified in the line `initialdir`. The -`$(process)` macro assigns integer numbers to the job subdirectory -name `job_`. The numbers run form `0` to `queue-1`. The name of the input -data file is specified in the line `transfer_input_files`. - -{{% panel header="`submit_f.condor`"%}} -{{< highlight bash >}} -universe = grid -grid_resource = pbs -batch_queue = guest -should_transfer_files = yes -when_to_transfer_output = on_exit -executable = demo_f_condor.x -output = Fortran_$(process).out -error = Fortran_$(process).err -initialdir = job_$(process) -transfer_input_files = data.dat -queue 2 -{{< /highlight >}} -{{% /panel %}} - -{{% panel header="`submit_c.condor`"%}} -{{< highlight bash >}} -universe = grid -grid_resource = pbs -batch_queue = guest -should_transfer_files = yes -when_to_transfer_output = on_exit -executable = demo_c_condor.x -output = C_$(process).out -error = C_$(process).err -initialdir = job_$(process) -transfer_input_files = data.dat -queue 2 -{{< /highlight >}} -{{% /panel %}} - -#### Submit the Job - -The job can be submitted through the command `condor_submit`. The job -status can be monitored by entering `condor_q` followed by the -username. - -{{< highlight bash >}} -$ condor_submit submit_f.condor -$ condor_submit submit_c.condor -$ condor_q <username> -{{< /highlight >}} - -Replace `<username>` with your HCC username. - -Sample Output -------------- - -In the job subdirectory `job_0`, the sum from 1 to 5 is computed and -printed to the `.out` file. In the job subdirectory `job_1`, the sum -from 6 to 10 is computed and printed to the `.out` file. - -{{%expand "Fortran_0.out" %}} -{{< highlight batchfile>}} - i,x = 1 1.0000000000000000 - i,x = 2 2.0000000000000000 - i,x = 3 3.0000000000000000 - i,x = 4 4.0000000000000000 - i,x = 5 5.0000000000000000 - sum(y) = 15.000000000000000 -{{< /highlight >}} -{{% /expand %}} - -{{%expand "Fortran_1.out" %}} -{{< highlight batchfile>}} - i,x = 1 6.0000000000000000 - i,x = 2 7.0000000000000000 - i,x = 3 8.0000000000000000 - i,x = 4 9.0000000000000000 - i,x = 5 10.000000000000000 - sum(y) = 40.000000000000000 -{{< /highlight >}} -{{% /expand %}} diff --git a/content/Submitting_Jobs/submitting_an_openmp_job.md b/content/Submitting_Jobs/submitting_an_openmp_job.md deleted file mode 100644 index 837f2c15..00000000 --- a/content/Submitting_Jobs/submitting_an_openmp_job.md +++ /dev/null @@ -1,42 +0,0 @@ -+++ -title = "Submitting an OpenMP Job" -description = "How to submit an OpenMP job on HCC resources." -+++ - -Submitting an OpenMP job is different from -[Submitting an MPI Job]({{< relref "submitting_an_mpi_job" >}}) -since you must request multiple cores from a single node. - -{{% panel theme="info" header="OpenMP example submission" %}} -{{< highlight batch >}} -#!/bin/sh -#SBATCH --ntasks-per-node=16 # 16 cores -#SBATCH --nodes=1 # 1 node -#SBATCH --mem-per-cpu=1024 # Minimum memory required per CPU (in megabytes) -#SBATCH --time=03:15:00 # Run time in hh:mm:ss -#SBATCH --error=/work/[groupname]/[username]/job.%J.err -#SBATCH --output=/work/[groupname]/[username]/job.%J.out - -export OMP_NUM_THREADS=${SLURM_NTASKS_PER_NODE} -./openmp-app.exe -{{< /highlight >}} -{{% /panel %}} - -Notice that we used `ntasks-per-node` to specify the number of cores we -want on a single node. Additionally, we specify that we only want -1 `node`. - -`OMP_NUM_THREADS` is required to limit the number of cores that OpenMP -will use on the node. It is set to ${SLURM_NTASKS_PER_NODE} to -automatically match the `ntasks-per-node` value (in this example 16). - -### Compiling - -Directions to compile OpenMP can be found on -[Compiling an OpenMP Application] -({{< relref "/Applications/Using_Your_Own_Software/compiling_an_openmp_application" >}}). - -### Further Documentation - -Further OpenMP documentation can be found on LLNL's -[OpenMP](https://computing.llnl.gov/tutorials/openMP) website. diff --git a/content/accounts/_index.md b/content/accounts/_index.md new file mode 100644 index 00000000..d443ac6e --- /dev/null +++ b/content/accounts/_index.md @@ -0,0 +1,17 @@ ++++ +title = "Creating an Account" +weight = "20" ++++ + +Anyone affiliated with the Unveristy of Nebraska system can request an account on +and use HCC shared resources for free. + +All HCC accounts are associated with an faculty-owned HCC group. Individuals interested +in requested an account under an established group will need to complete a [new user request](http://hcc.unl.edu/new-user-request/). + +To establish a new group, please complete a [new group request](https://hcc.unl.edu/new-group-request). + +Additional guides on basic account operations. +-------------------------------------- + +{{% children description="true" %}} diff --git a/content/Accounts/how_to_change_your_password.md b/content/accounts/how_to_change_your_password.md old mode 100755 new mode 100644 similarity index 100% rename from content/Accounts/how_to_change_your_password.md rename to content/accounts/how_to_change_your_password.md diff --git a/content/Accounts/setting_up_and_using_duo.md b/content/accounts/setting_up_and_using_duo.md old mode 100755 new mode 100644 similarity index 100% rename from content/Accounts/setting_up_and_using_duo.md rename to content/accounts/setting_up_and_using_duo.md diff --git a/content/anvil/_index.md b/content/anvil/_index.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Anvil/adding_ssh_key_pairs.md b/content/anvil/adding_ssh_key_pairs.md similarity index 100% rename from content/Anvil/adding_ssh_key_pairs.md rename to content/anvil/adding_ssh_key_pairs.md diff --git a/content/Anvil/anvil_instance_types.md b/content/anvil/anvil_instance_types.md similarity index 100% rename from content/Anvil/anvil_instance_types.md rename to content/anvil/anvil_instance_types.md diff --git a/content/Anvil/available_images.md b/content/anvil/available_images.md similarity index 100% rename from content/Anvil/available_images.md rename to content/anvil/available_images.md diff --git a/content/Anvil/connecting_to_linux_instances_from_mac.md b/content/anvil/connecting_to_linux_instances_from_mac.md similarity index 100% rename from content/Anvil/connecting_to_linux_instances_from_mac.md rename to content/anvil/connecting_to_linux_instances_from_mac.md diff --git a/content/Anvil/connecting_to_linux_instances_from_windows.md b/content/anvil/connecting_to_linux_instances_from_windows.md similarity index 100% rename from content/Anvil/connecting_to_linux_instances_from_windows.md rename to content/anvil/connecting_to_linux_instances_from_windows.md diff --git a/content/Anvil/connecting_to_linux_instances_using_x2go.md b/content/anvil/connecting_to_linux_instances_using_x2go.md similarity index 100% rename from content/Anvil/connecting_to_linux_instances_using_x2go.md rename to content/anvil/connecting_to_linux_instances_using_x2go.md diff --git a/content/Anvil/connecting_to_the_anvil_vpn.md b/content/anvil/connecting_to_the_anvil_vpn.md similarity index 100% rename from content/Anvil/connecting_to_the_anvil_vpn.md rename to content/anvil/connecting_to_the_anvil_vpn.md diff --git a/content/Anvil/connecting_to_windows_instances.md b/content/anvil/connecting_to_windows_instances.md similarity index 100% rename from content/Anvil/connecting_to_windows_instances.md rename to content/anvil/connecting_to_windows_instances.md diff --git a/content/Anvil/creating_an_instance.md b/content/anvil/creating_an_instance.md similarity index 100% rename from content/Anvil/creating_an_instance.md rename to content/anvil/creating_an_instance.md diff --git a/content/Anvil/creating_and_attaching_a_volume.md b/content/anvil/creating_and_attaching_a_volume.md similarity index 100% rename from content/Anvil/creating_and_attaching_a_volume.md rename to content/anvil/creating_and_attaching_a_volume.md diff --git a/content/Anvil/creating_ssh_key_pairs_on_mac.md b/content/anvil/creating_ssh_key_pairs_on_mac.md similarity index 100% rename from content/Anvil/creating_ssh_key_pairs_on_mac.md rename to content/anvil/creating_ssh_key_pairs_on_mac.md diff --git a/content/Anvil/creating_ssh_key_pairs_on_windows.md b/content/anvil/creating_ssh_key_pairs_on_windows.md similarity index 100% rename from content/Anvil/creating_ssh_key_pairs_on_windows.md rename to content/anvil/creating_ssh_key_pairs_on_windows.md diff --git a/content/Anvil/formatting_and_mounting_a_volume_in_linux.md b/content/anvil/formatting_and_mounting_a_volume_in_linux.md similarity index 100% rename from content/Anvil/formatting_and_mounting_a_volume_in_linux.md rename to content/anvil/formatting_and_mounting_a_volume_in_linux.md diff --git a/content/Anvil/formatting_and_mounting_a_volume_in_windows.md b/content/anvil/formatting_and_mounting_a_volume_in_windows.md similarity index 100% rename from content/Anvil/formatting_and_mounting_a_volume_in_windows.md rename to content/anvil/formatting_and_mounting_a_volume_in_windows.md diff --git a/content/Anvil/resizing_an_instance.md b/content/anvil/resizing_an_instance.md similarity index 100% rename from content/Anvil/resizing_an_instance.md rename to content/anvil/resizing_an_instance.md diff --git a/content/Anvil/what_are_the_per_group_resource_limits.md b/content/anvil/what_are_the_per_group_resource_limits.md similarity index 100% rename from content/Anvil/what_are_the_per_group_resource_limits.md rename to content/anvil/what_are_the_per_group_resource_limits.md diff --git a/content/Applications/_index.md b/content/applications/_index.md similarity index 83% rename from content/Applications/_index.md rename to content/applications/_index.md index 1caa5c3d..ecaa1702 100644 --- a/content/Applications/_index.md +++ b/content/applications/_index.md @@ -1,5 +1,5 @@ +++ -title = "Applications" +title = "Running Applications" weight = "40" +++ diff --git a/content/applications/app_specific/Jupyter.md b/content/applications/app_specific/Jupyter.md new file mode 100644 index 00000000..e69de29b diff --git a/content/applications/app_specific/_index.md b/content/applications/app_specific/_index.md new file mode 100644 index 00000000..0e4b37a9 --- /dev/null +++ b/content/applications/app_specific/_index.md @@ -0,0 +1,9 @@ ++++ +title = "Application Specific Guides" +weight = "40" ++++ + +In-depth guides for running applications on HCC resources +-------------------------------------- + +{{% children description="true" %}} diff --git a/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/_index.md b/content/applications/app_specific/allinea_profiling_and_debugging/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/_index.md rename to content/applications/app_specific/allinea_profiling_and_debugging/_index.md diff --git a/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/_index.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/_index.md rename to content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/_index.md diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/blast_with_allinea_performance_reports.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md similarity index 100% rename from content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md rename to content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/lammps_with_allinea_performance_reports.md diff --git a/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md b/content/applications/app_specific/allinea_profiling_and_debugging/allinea_performance_reports/ray_with_allinea_performance_reports.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md b/content/applications/app_specific/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md similarity index 100% rename from content/Applications/Application_Specific_Guides/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md rename to content/applications/app_specific/allinea_profiling_and_debugging/using_allinea_forge_via_reverse_connect.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/_index.md rename to content/applications/app_specific/bioinformatics_tools/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/_index.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/_index.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/create_local_blast_database.md diff --git a/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blast/running_blast_alignment.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blat.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/blat.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/blat.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/bowtie.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/bowtie.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/bowtie2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/bowtie2.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/bowtie2.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/bwa/_index.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/bwa/_index.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/bwa/running_bwa_commands.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/clustal_omega.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/clustal_omega.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/clustal_omega.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/tophat_tophat2.md b/content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/alignment_tools/tophat_tophat2.md rename to content/applications/app_specific/bioinformatics_tools/alignment_tools/tophat_tophat2.md diff --git a/content/applications/app_specific/bioinformatics_tools/biodata_module.md b/content/applications/app_specific/bioinformatics_tools/biodata_module.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/_index.md rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/bamtools/running_bamtools_commands.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/samtools/_index.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/samtools/_index.md rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/samtools/running_samtools_commands.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/sratoolkit.md b/content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/data_manipulation_tools/sratoolkit.md rename to content/applications/app_specific/bioinformatics_tools/data_manipulation_tools/sratoolkit.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/_index.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/oases.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/oases.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/oases.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/ray.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/ray.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/ray.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/soapdenovo2.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/trinity/running_trinity_in_multiple_steps.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_paired_end_data.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_and_paired_end_data.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md b/content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md rename to content/applications/app_specific/bioinformatics_tools/de_novo_assembly_tools/velvet/running_velvet_with_single_end_data.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/downloading_sra_data_from_ncbi.md b/content/applications/app_specific/bioinformatics_tools/downloading_sra_data_from_ncbi.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/downloading_sra_data_from_ncbi.md rename to content/applications/app_specific/bioinformatics_tools/downloading_sra_data_from_ncbi.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/_index.md rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/cutadapt.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/cutadapt.md rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/cutadapt.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/prinseq.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/prinseq.md rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/prinseq.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/scythe.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/scythe.md rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/scythe.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/sickle.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/sickle.md rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/sickle.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/tagcleaner.md b/content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/pre_processing_tools/tagcleaner.md rename to content/applications/app_specific/bioinformatics_tools/pre_processing_tools/tagcleaner.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/qiime.md b/content/applications/app_specific/bioinformatics_tools/qiime.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/qiime.md rename to content/applications/app_specific/bioinformatics_tools/qiime.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/reference_based_assembly_tools/_index.md b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/reference_based_assembly_tools/_index.md rename to content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md b/content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md rename to content/applications/app_specific/bioinformatics_tools/reference_based_assembly_tools/cufflinks.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md rename to content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/_index.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md rename to content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cap3.md diff --git a/content/Applications/Application_Specific_Guides/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md b/content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md similarity index 100% rename from content/Applications/Application_Specific_Guides/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md rename to content/applications/app_specific/bioinformatics_tools/removing_detecting_redundant_sequences/cd_hit.md diff --git a/content/applications/app_specific/dmtcp_checkpointing.md b/content/applications/app_specific/dmtcp_checkpointing.md new file mode 100644 index 00000000..e69de29b diff --git a/content/applications/app_specific/fortran_c_on_hcc.md b/content/applications/app_specific/fortran_c_on_hcc.md new file mode 100644 index 00000000..e69de29b diff --git a/content/applications/app_specific/mpi_jobs_on_hcc.md b/content/applications/app_specific/mpi_jobs_on_hcc.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Applications/Application_Specific_Guides/running_gaussian_at_hcc.md b/content/applications/app_specific/running_gaussian_at_hcc.md similarity index 100% rename from content/Applications/Application_Specific_Guides/running_gaussian_at_hcc.md rename to content/applications/app_specific/running_gaussian_at_hcc.md diff --git a/content/Applications/Application_Specific_Guides/running_matlab_parallel_server.md b/content/applications/app_specific/running_matlab_parallel_server.md similarity index 100% rename from content/Applications/Application_Specific_Guides/running_matlab_parallel_server.md rename to content/applications/app_specific/running_matlab_parallel_server.md diff --git a/content/Applications/Application_Specific_Guides/running_ocean_land_atmosphere_model_olam.md b/content/applications/app_specific/running_ocean_land_atmosphere_model_olam.md similarity index 100% rename from content/Applications/Application_Specific_Guides/running_ocean_land_atmosphere_model_olam.md rename to content/applications/app_specific/running_ocean_land_atmosphere_model_olam.md diff --git a/content/Applications/Application_Specific_Guides/running_theano.md b/content/applications/app_specific/running_theano.md similarity index 100% rename from content/Applications/Application_Specific_Guides/running_theano.md rename to content/applications/app_specific/running_theano.md diff --git a/content/Applications/Using_Modules/module_commands.md b/content/applications/modules/_index.md similarity index 95% rename from content/Applications/Using_Modules/module_commands.md rename to content/applications/modules/_index.md index 662fa29c..52826a3b 100644 --- a/content/Applications/Using_Modules/module_commands.md +++ b/content/applications/modules/_index.md @@ -1,8 +1,16 @@ +++ -title = "Module Commands" +title = "Using Preinstalled Software" description = "How to use the module utility on HCC resources." +weight=10 +++ +HCC offers many popular software packages already installed. Unlike a traditional +laptop or desktop, HCC resources use a module system for managing installed software. Users can load and +use pre-installed software by using the `module` command. + +To request additional software installs, please complete a [software installation request] +(https://hcc.unl.edu/software-installation-request). + `module` commands provide an HPC system user the capability to compile into their source code using any type of library that is available on the server. The `module` command gives each user the diff --git a/content/Applications/Using_Modules/available_software_for_crane.md b/content/applications/modules/available_software_for_crane.md similarity index 100% rename from content/Applications/Using_Modules/available_software_for_crane.md rename to content/applications/modules/available_software_for_crane.md diff --git a/content/Applications/Using_Modules/available_software_for_rhino.md b/content/applications/modules/available_software_for_rhino.md similarity index 100% rename from content/Applications/Using_Modules/available_software_for_rhino.md rename to content/applications/modules/available_software_for_rhino.md diff --git a/content/applications/user_software/_index.md b/content/applications/user_software/_index.md new file mode 100644 index 00000000..f8b67665 --- /dev/null +++ b/content/applications/user_software/_index.md @@ -0,0 +1,29 @@ ++++ +title = "Using Custom Software" +description = "How to compile source code of various types on HCC resources." +weight=20 ++++ + +# Compile Code from Source + +Compiling source code on HCC machines is done with compiler environments +configured by the `module` utility. The utility adds directives to the +environment and adds executables to the `PATH` so that users and jobs +can directly call the compilers. + +### Finding available compiler Modules + +{{< highlight bash >}} +$ module spider compiler +{{< /highlight >}} + +This command will list available compiler modules to load. + +### Loading Modules + +{{< highlight bash >}} +$ module load <module> +{{< /highlight >}} + +This command will load an available module. The modules are usually in +the form of `compiler/<name>/<version>`. diff --git a/content/Applications/Using_Your_Own_Software/compiling_an_openmp_application.md b/content/applications/user_software/compiling_an_openmp_application.md similarity index 100% rename from content/Applications/Using_Your_Own_Software/compiling_an_openmp_application.md rename to content/applications/user_software/compiling_an_openmp_application.md diff --git a/content/Applications/Using_Your_Own_Software/installing_perl_modules.md b/content/applications/user_software/installing_perl_modules.md similarity index 100% rename from content/Applications/Using_Your_Own_Software/installing_perl_modules.md rename to content/applications/user_software/installing_perl_modules.md diff --git a/content/applications/user_software/using_anaconda_package_manager.md b/content/applications/user_software/using_anaconda_package_manager.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Applications/Using_Your_Own_Software/using_singularity.md b/content/applications/user_software/using_singularity.md similarity index 100% rename from content/Applications/Using_Your_Own_Software/using_singularity.md rename to content/applications/user_software/using_singularity.md diff --git a/content/Connecting/_index.md b/content/connecting/_index.md similarity index 100% rename from content/Connecting/_index.md rename to content/connecting/_index.md diff --git a/content/Connecting/linux/basic_linux_commands.md b/content/connecting/basic_linux_commands.md similarity index 100% rename from content/Connecting/linux/basic_linux_commands.md rename to content/connecting/basic_linux_commands.md diff --git a/content/Quickstarts/connecting/for_maclinux_users.md b/content/connecting/for_maclinux_users.md similarity index 74% rename from content/Quickstarts/connecting/for_maclinux_users.md rename to content/connecting/for_maclinux_users.md index 67970f61..2e1a2243 100644 --- a/content/Quickstarts/connecting/for_maclinux_users.md +++ b/content/connecting/for_maclinux_users.md @@ -5,7 +5,7 @@ weight = "22" +++ ##### Use of Duo two-factor authentication is **required** to access HCC resources. -##### Please see [Setting up and Using Duo]({{< relref "/Accounts/setting_up_and_using_duo" >}}). +##### Please see [Setting up and Using Duo]({{< relref "setting_up_and_using_duo" >}}). --- - [Access to HCC Supercomputers] (#access-to-hcc-supercomputers) - [File Transferring with HCC Supercomputers] (#file-transfering) @@ -18,20 +18,20 @@ This quick start will help you configure your personal computer to work with the HCC supercomputers. If you are running Windows, please use the quickstart [For Windows -Users]({{< relref "for_windows_users" >}}). +Users]({{< relref "/connecting/for_windows_users" >}}). Access to HCC Supercomputers ------------------------------- For Mac/Linux users, use the system program Terminal to access to the HCC supercomputers. In the Terminal prompt, -type `ssh <username>@crane.unl.edu` and the corresponding password -to get access to the HCC cluster **Crane**. Note that <username> -should be replaced by your HCC account username. If you do not have a +type `ssh <username>@crane.unl.edu` and the corresponding password +to get access to the HCC cluster **Crane**. Note that <username> +should be replaced by your HCC account username. If you do not have a HCC account, please contact a HCC specialist ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)) or go to https://hcc.unl.edu/newusers. -To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu. +To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu. {{< highlight bash >}} $ ssh <username>@crane.unl.edu @@ -44,9 +44,9 @@ File Transferring with HCC Supercomputers ### Using the SCP command For Mac/Linux users, file transferring between your personal computer -and the HCC supercomputers can be achieved through the command `scp`. +and the HCC supercomputers can be achieved through the command `scp`. Here we use **Crane** for example. **The following commands should be -executed from your computer. ** +executed from your computer. ** **Uploading from local to remote** @@ -57,7 +57,7 @@ $ scp -r ./<folder name> <username>@crane.unl.edu:/work/<group name>/<username> The above command line transfers a folder from the current directory (`./`) of the your computer to the `$WORK` directory of the HCC supercomputer, Crane. Note that you need to replace `<group name>` -and `<username>` with your HCC group name and username. +and `<username>` with your HCC group name and username. **Downloading from remote to local** @@ -65,15 +65,15 @@ and `<username>` with your HCC group name and username. $ scp -r <username>@crane.unl.edu:/work/<group name>/<username>/<folder name> ./ {{< /highlight >}} -The above command line transfers a folder from the `$WORK` directory of -the HCC supercomputer, Crane, to the current directory (`./`) of the +The above command line transfers a folder from the `$WORK` directory of +the HCC supercomputer, Crane, to the current directory (`./`) of the your computer. ### Using Cyberduck --------------- If you wish to use a GUI, be aware that not all programs will function -correctly with Duo two-factor authentication. Mac users are recommended +correctly with Duo two-factor authentication. Mac users are recommended to use [Cyberduck](https://cyberduck.io). It is compatible with Duo, but a few settings need to be changed. @@ -82,7 +82,7 @@ Under **Preferences - General**, change the default protocol to SFTP: {{< figure src="/images/7274497.png" height="450" >}} Under **Preferences - Transfers**, reuse the browser connection for file -transfers. This will avoid the need to reenter your password for every +transfers. This will avoid the need to reenter your password for every file transfer: {{< figure src="/images/7274498.png" height="450" >}} @@ -96,9 +96,9 @@ To add an HCC machine, in the bookmarks pane click the "+" icon: {{< figure src="/images/7274500.png" height="450" >}} -Ensure the type of connection is SFTP. Enter the hostname of the machine +Ensure the type of connection is SFTP. Enter the hostname of the machine you wish to connect to (crane.unl.edu, rhino.unl.edu) in the **Server** -field, and your HCC username in the **Username** field. The +field, and your HCC username in the **Username** field. The **Nickname** field is arbitrary, so enter whatever you prefer. {{< figure src="/images/7274501.png" height="450" >}} @@ -112,15 +112,15 @@ and click *Login*. {{< figure src="/images/7274508.png" height="450" >}} -A second login dialogue will now appear. Notice the text has changed to +A second login dialogue will now appear. Notice the text has changed to say Duo two-factor. {{< figure src="/images/7274510.png" height="450" >}} -Clear the **Password** field in the dialogue. If you are using the Duo +Clear the **Password** field in the dialogue. If you are using the Duo Mobile app, enter '1' to have a push notification send to your phone or -tablet. If you are using a Yubikey, ensure the cursor is active in the -**Password** field, and press the button on the Yubikey. +tablet. If you are using a Yubikey, ensure the cursor is active in the +**Password** field, and press the button on the Yubikey. {{< figure src="/images/7274509.png" height="450" >}} diff --git a/content/Quickstarts/connecting/for_windows_users.md b/content/connecting/for_windows_users.md similarity index 72% rename from content/Quickstarts/connecting/for_windows_users.md rename to content/connecting/for_windows_users.md index 343206cc..9d915ca4 100644 --- a/content/Quickstarts/connecting/for_windows_users.md +++ b/content/connecting/for_windows_users.md @@ -5,7 +5,7 @@ weight = "20" +++ ##### Use of Duo two-factor authentication is **required** to access HCC resources. -##### Please see [Setting up and Using Duo]({{< relref "/Accounts/setting_up_and_using_duo" >}}). +##### Please see [Setting up and Using Duo]({{< relref "setting_up_and_using_duo" >}}). --- @@ -22,7 +22,7 @@ Access to HCC Supercomputers {{% notice info %}} If you are on a Mac, please use the quickstart for [For Mac/Linux -Users]({{< relref "for_maclinux_users" >}}). +Users]({{< relref "/connecting/for_maclinux_users" >}}). {{% /notice %}} @@ -30,16 +30,16 @@ Users]({{< relref "for_maclinux_users" >}}). -------------- For Windows 10 users, use the Command Prompt, accessed by entering `cmd` in the start menu, to access to the HCC supercomputers. In the Command Prompt, -type `ssh <username>@crane.unl.edu` and the corresponding password -to get access to the HCC cluster **Crane**. Note that <username> -should be replaced by your HCC account username. If you do not have a +type `ssh <username>@tusker.unl.edu` and the corresponding password +to get access to the HCC cluster **Tusker**. Note that <username> +should be replaced by your HCC account username. If you do not have a HCC account, please contact a HCC specialist ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)) or go to http://hcc.unl.edu/newusers. - +To use the **Crane** cluster, replace tusker.unl.edu with crane.unl.edu. {{< highlight bash >}} -C:\> ssh <username>@crane.unl.edu +C:\> ssh <username>@tusker.unl.edu C:\> <password> {{< /highlight >}} @@ -49,21 +49,21 @@ C:\> <password> -------------- This quick start will help you configure your personal computer to work with the HCC supercomputers. Here we use the two third party application -**PuTTY** and **WinSCP** for demonstration. +**PuTTY** and **WinSCP** for demonstration. PuTTY: https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html -or [Direct Link](https://the.earth.li/~sgtatham/putty/latest/w32/putty.exe) +or [Direct Link](https://the.earth.li/~sgtatham/putty/latest/w32/putty.exe) Here we use the HCC cluster **Tusker** for demonstration. To use the -**Crane** cluster, replace `tusker.unl.edu` with `crane.unl.edu`. +**Crane** or cluster, replace `tusker.unl.edu` with `crane.unl.edu`. -1. On the first screen, type `tusker.unl.edu` for Host Name, then click - **Open**. +1. On the first screen, type `tusker.unl.edu` for Host Name, then click + **Open**. {{< figure src="/images/3178523.png" height="450" >}} -2. On the second screen, click on **Yes**. +2. On the second screen, click on **Yes**. {{< figure src="/images/3178524.png" height="300" >}} -3. On the third screen, enter your HCC account **username**. If you do +3. On the third screen, enter your HCC account **username**. If you do not have a HCC account, please contact an HCC specialist ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)) or go to http://hcc.unl.edu/newusers. @@ -73,9 +73,9 @@ Here we use the HCC cluster **Tusker** for demonstration. To use the {{< figure src="/images/8127261.png" height="450" >}} -4. On the next screen, enter your HCC account **password**. +4. On the next screen, enter your HCC account **password**. - {{% notice info %}}**Note that PuTTY will not show the characters as you type for security reasons.**{{% /notice %}} + {{% notice info %}}**Note that PuTTY will not show the characters as you type for security reasons.**{{% /notice %}} {{< figure src="/images/8127262.png" height="450" >}} @@ -85,7 +85,7 @@ Here we use the HCC cluster **Tusker** for demonstration. To use the 6. If you have a Yubikey set up by HCC, please hold the Yubikey for \~1 second. Then you will be brought to your home directory similar as - below. + below. {{< figure src="/images/8127266.png" height="450" >}} @@ -105,21 +105,21 @@ Here we use the HCC cluster **Tusker** for demonstration. To use the home directory similar as below. {{< figure src="/images/8127264.png" height="450" >}} - + File Transferring with HCC Supercomputers ----------------------------------------- {{% notice info%}} -For best results when transfering data to and from the clusters, refer to [Data Transfer]({{< ref "/Data_Transfer" >}}) +For best results when transfering data to and from the clusters, refer to [Handling Data]({{< ref "/handling_data" >}}) {{%/notice%}} ### SCP For Windows users, file transferring between your personal computer -and the HCC supercomputers can be achieved through the command `scp`. +and the HCC supercomputers can be achieved through the command `scp`. Here we use **Tusker** for example. **The following commands should be -executed from your computer. ** +executed from your computer. ** **Uploading from local to remote** @@ -130,7 +130,7 @@ C:\> scp -r .\<folder name> <username>@tusker.unl.edu:/work/<group name>/<userna The above command line transfers a folder from the current directory (`.\`) of the your computer to the `$WORK` directory of the HCC supercomputer, Tusker. Note that you need to replace `<group name>` -and `<username>` with your HCC group name and username. +and `<username>` with your HCC group name and username. **Downloading from remote to local** @@ -138,8 +138,8 @@ and `<username>` with your HCC group name and username. C:\> scp -r <username>@tusker.unl.edu:/work/<group name>/<username>/<folder name> .\ {{< /highlight >}} -The above command line transfers a folder from the `$WORK` directory of -the HCC supercomputer, Tusker, to the current directory (`.\`) of the +The above command line transfers a folder from the `$WORK` directory of +the HCC supercomputer, Tusker, to the current directory (`.\`) of the your computer. @@ -149,19 +149,19 @@ WinSCP: http://winscp.net/eng/download.php Usually it is convenient to upload and download files between your personal computer and the HCC supercomputers through a Graphic User Interface (GUI). -Download and install the third party application **WinSCP** +Download and install the third party application **WinSCP** to connect the file systems between your personal computer and the HCC supercomputers. -Below is a step-by-step installation guide. Here we use the HCC cluster **Tusker** -for demonstration. To use the **Crane** cluster, replace `tusker.unl.edu` -with `crane.unl.edu`. +Below is a step-by-step installation guide. Here we use the HCC cluster **Tusker** +for demonstration. To use the **Crane** cluster, replace `tusker.unl.edu` +with `crane.unl.edu`. -1. On the first screen, type `tusker.unl.edu` for Host name, enter your +1. On the first screen, type `tusker.unl.edu` for Host name, enter your HCC account username and password for User name and Password. Then - click on **Login**. + click on **Login**. {{< figure src="/images/3178530.png" height="450" >}} -2. On the second screen, click on **Yes**. +2. On the second screen, click on **Yes**. {{< figure src="/images/3178531.png" >}} @@ -170,20 +170,20 @@ with `crane.unl.edu`. {{< figure src="/images/8127268.png" >}} -4. On the third screen, click on **Remote**. Under Remote, choose Go To +4. On the third screen, click on **Remote**. Under Remote, choose Go To and Open Directory/Bookmark. Alternatively, you can use the keyboard shortcut "Ctrl + O". {{< figure src="/images/3178532.png" height="450" >}} -5. On the final screen, type `/work/<group name>/<username>` for Open +5. On the final screen, type `/work/<group name>/<username>` for Open directory. Use your HCC group name and username to replace - `<group name>` and `<username>`. Then click on **OK**. + `<group name>` and `<username>`. Then click on **OK**. {{< figure src="/images/3178533.png" height="450" >}} 6. Now you can drop and drag the files between your personal computer - and the HCC supercomputers. + and the HCC supercomputers. {{< figure src="/images/3178539.png" height="450" >}} Tutorial Video @@ -192,3 +192,4 @@ Tutorial Video {{< youtube -Vh7SyC-3mA >}} + diff --git a/content/Connecting/how_to_setup_x11_forwarding.md b/content/connecting/how_to_setup_x11_forwarding.md similarity index 100% rename from content/Connecting/how_to_setup_x11_forwarding.md rename to content/connecting/how_to_setup_x11_forwarding.md diff --git a/content/Quickstarts/connecting/mobaxterm_windows.md b/content/connecting/mobaxterm_windows.md similarity index 85% rename from content/Quickstarts/connecting/mobaxterm_windows.md rename to content/connecting/mobaxterm_windows.md index 6a7c000a..c3c9cac5 100644 --- a/content/Quickstarts/connecting/mobaxterm_windows.md +++ b/content/connecting/mobaxterm_windows.md @@ -5,7 +5,7 @@ weight = "22" +++ ##### Use of Duo two-factor authentication is **required** to access HCC resources. -##### Please see [Setting up and Using Duo]({{< relref "/Accounts/setting_up_and_using_duo" >}}). +##### Please see [Setting up and Using Duo]({{< relref "setting_up_and_using_duo" >}}). --- This quick start will help you configure your MobaXterm installation to work with HCC resources. @@ -16,12 +16,12 @@ Access to HCC Supercomputers using MobaXterm To connect to HCC resources using MobaXterm, open the application and select the Session Icon. {{< figure src="/images/moba/main.png" height="450" >}} -Select SSH as the session type. Enter the cluster you are connecting to, in the example, `crane.unl.edu`, is used. Check `Specify username` and enter your HCC username in the the box. Note that <username> -should be replaced by your HCC account username. If you do not have a +Select SSH as the session type. Enter the cluster you are connecting to, in the example, `crane.unl.edu`, is used. Check `Specify username` and enter your HCC username in the the box. Note that <username> +should be replaced by your HCC account username. If you do not have a HCC account, please contact a HCC specialist ({{< icon name="envelope" >}}[hcc-support@unl.edu] (mailto:hcc-support@unl.edu)) or go to https://hcc.unl.edu/newusers. -To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu. +To use the **Rhino** cluster, replace crane.unl.edu with with rhino.unl.edu. {{< figure src="/images/moba/session.png" height="450" >}} Select OK. You will be asked to enter your password and to authenticate with duo. @@ -41,14 +41,14 @@ MobaXterm allows file transfering in a 'drag and drop' style, similar to WinSCP. The above example transfers a folder from a local directory of the your computer to the `$HOME` directory of the HCC supercomputer, Crane. Note that you need to replace `<group name>` -and `<username>` with your HCC group name and username. +and `<username>` with your HCC group name and username. {{< figure src="/images/moba/upload.png" height="450" >}} **Downloading from remote to local** -The above example transfers a folder from the `$HOME` directory of -the HCC supercomputer, Crane, to a local directory on +The above example transfers a folder from the `$HOME` directory of +the HCC supercomputer, Crane, to a local directory on your computer. {{< figure src="/images/moba/download.png" height="450" >}} **Editing remote files** @@ -77,3 +77,4 @@ After the key is generated, go to the `Conversions` tab and select `Export OpenS {{< figure src="/images/moba/exportssh.png" height="450" >}} **Treat the private key file the same as you would a password. Keep your private key in a secure location and do not share it with anyone.** + diff --git a/content/Connecting/reusing_ssh_connections_in_linux_or_mac.md b/content/connecting/reusing_ssh_connections_in_linux_or_mac.md similarity index 100% rename from content/Connecting/reusing_ssh_connections_in_linux_or_mac.md rename to content/connecting/reusing_ssh_connections_in_linux_or_mac.md diff --git a/content/Contact_Us/_index.md b/content/contact_us/_index.md similarity index 100% rename from content/Contact_Us/_index.md rename to content/contact_us/_index.md diff --git a/content/Data_Storage/data_storage_overview.md b/content/handling_data/_index.md similarity index 65% rename from content/Data_Storage/data_storage_overview.md rename to content/handling_data/_index.md index c841e4f0..1b648b41 100644 --- a/content/Data_Storage/data_storage_overview.md +++ b/content/handling_data/_index.md @@ -1,14 +1,14 @@ +++ -title = "Data Storage Overview" -description = "Overview of Data Storage on HCC resources." -weight = "10" +title = "Handling Data" +description = "How to work with and transfer data to/from HCC resources." +weight = "30" +++ {{% panel theme="danger" header="**Sensitive and Protected Data**" %}}HCC currently has *no storage* that is suitable for **HIPAA** or other **PID** data sets. Users are not permitted to store such data on HCC machines.{{% /panel %}} All HCC machines have three separate areas for every user to store data, -each intended for a different purpose. In addition, we have a transfer -service that utilizes [Globus Connect]({{< relref "/Data_Transfer/globus_connect" >}}). +each intended for a different purpose. In addition, we have a transfer +service that utilizes [Globus Connect]({{< relref "data_transfer/globus_connect/" >}}). {{< figure src="/images/35325560.png" height="500" class="img-border">}} --- @@ -20,11 +20,11 @@ variable (i.e. '`cd $HOME'`). {{% /notice %}} Your home directory (i.e. `/home/[group]/[username]`) is meant for items -that take up relatively small amounts of space. For example: source -code, program binaries, configuration files, etc. This space is -quota-limited to **20GB per user**. The home directories are backed up -for the purposes of best-effort disaster recovery. This space is not -intended as an area for I/O to active jobs. **/home** is mounted +that take up relatively small amounts of space. For example: source +code, program binaries, configuration files, etc. This space is +quota-limited to **20GB per user**. The home directories are backed up +for the purposes of best-effort disaster recovery. This space is not +intended as an area for I/O to active jobs. **/home** is mounted **read-only** on cluster worker nodes to enforce this policy. --- @@ -67,12 +67,12 @@ variable (i.e. '`cd $WORK'`). {{% panel theme="danger" header="**File Loss**" %}}The `/work` directories are **not backed up**. Irreparable data loss is possible with a mis-typed command. See [Preventing File Loss]({{< relref "preventing_file_loss" >}}) for strategies to avoid this.{{% /panel %}} Every user has a corresponding directory under /work using the same -naming convention as `/home` (i.e. `/work/[group]/[username]`). We -encourage all users to use this space for I/O to running jobs. This +naming convention as `/home` (i.e. `/work/[group]/[username]`). We +encourage all users to use this space for I/O to running jobs. This directory can also be used when larger amounts of space are temporarily -needed. There is a **50TB per group quota**; space in /work is shared -among all users. It should be treated as short-term scratch space, and -**is not backed up**. **Please use the `hcc-du` command to check your +needed. There is a **50TB per group quota**; space in /work is shared +among all users. It should be treated as short-term scratch space, and +**is not backed up**. **Please use the `hcc-du` command to check your own and your group's usage, and back up and clean up your files at reasonable intervals in $WORK.** @@ -80,17 +80,17 @@ reasonable intervals in $WORK.** ### Purge Policy HCC has a **purge policy on /work** for files that become dormant. - After **6 months of inactivity on a file (26 weeks)**, an automated -purge process will reclaim the used space of these dormant files. HCC + After **6 months of inactivity on a file (26 weeks)**, an automated +purge process will reclaim the used space of these dormant files. HCC provides the **`hcc-purge`** utility to list both the summary and the actual file paths of files that have been dormant for **24 weeks**. - This list is periodically generated; the timestamp of the last search + This list is periodically generated; the timestamp of the last search is included in the default summary output when calling `hcc-purge` with -no arguments. No output from `hcc-purge` indicates the last scan did -not find any dormant files. `hcc-purge -l` will use the less pager to -list the matching files for the user. The candidate list can also be +no arguments. No output from `hcc-purge` indicates the last scan did +not find any dormant files. `hcc-purge -l` will use the less pager to +list the matching files for the user. The candidate list can also be accessed at the following path:` /lustre/purge/current/${USER}.list`. - This list is updated twice a week, on Mondays and Thursdays. + This list is updated twice a week, on Mondays and Thursdays. {{% notice warning %}} `/work` is intended for recent job output and not long term storage. Evidence of circumventing the purge policy by users will result in consequences including account lockout. @@ -98,33 +98,33 @@ accessed at the following path:` /lustre/purge/current/${USER}.list`. If you have space requirements outside what is currently provided, please -email <a href="mailto:hcc-support@unl.edu" class="external-link">hcc-support@unl.edu</a> and +email <a href="mailto:hcc-support@unl.edu" class="external-link">hcc-support@unl.edu</a> and we will gladly discuss alternatives. --- ### [Attic]({{< relref "using_attic" >}}) -Attic is a near line archive available for purchase at HCC. Attic -provides reliable large data storage that is designed to be more -reliable then `/work`, and larger than `/home`. Access to Attic is done -through [Globus Connect]({{< relref "/Data_Transfer/globus_connect" >}}). +Attic is a near line archive available for purchase at HCC. Attic +provides reliable large data storage that is designed to be more +reliable then `/work`, and larger than `/home`. Access to Attic is done +through [Globus Connect]({{< relref "data_transfer/globus_connect/" >}}). More details on Attic can be found on HCC's <a href="https://hcc.unl.edu/attic" class="external-link">Attic</a> website. --- -### [Globus Connect]({{< relref "/Data_Transfer/globus_connect" >}}) +### [Globus Connect]({{< relref "data_transfer/globus_connect/" >}}) For moving large amounts of data into or out of HCC resources, users are -highly encouraged to consider using [Globus -Connect]({{< relref "/Data_Transfer/globus_connect" >}}). +highly encouraged to consider using [Globus +Connect]({{< relref "data_transfer/globus_connect/" >}}). --- ### Using Box You can use your [UNL -Box.com]({{< relref "integrating_box_with_hcc" >}}) account to download and +Box.com]({{< relref "integrating_box_with_hcc" >}}) account to download and upload files from any of the HCC clusters. diff --git a/content/handling_data/data_storage/_index.md b/content/handling_data/data_storage/_index.md new file mode 100644 index 00000000..33f75fdd --- /dev/null +++ b/content/handling_data/data_storage/_index.md @@ -0,0 +1,129 @@ ++++ +title = "Data Storage" +description = "How to work with and transfer data to/from HCC resources." +weight = "30" ++++ + +{{% panel theme="danger" header="**Sensitive and Protected Data**" %}}HCC currently has *no storage* that is suitable for **HIPAA** or other **PID** data sets. Users are not permitted to store such data on HCC machines.{{% /panel %}} + +All HCC machines have three separate areas for every user to store data, +each intended for a different purpose. In addition, we have a transfer +service that utilizes [Globus Connect]({{< relref "../data_transfer/globus_connect" >}}). +{{< figure src="/images/35325560.png" >}} + +--- +### Home Directory + +{{% notice info %}} +You can access your home directory quickly using the $HOME environmental +variable (i.e. '`cd $HOME'`). +{{% /notice %}} + +Your home directory (i.e. `/home/[group]/[username]`) is meant for items +that take up relatively small amounts of space. For example: source +code, program binaries, configuration files, etc. This space is +quota-limited to **20GB per user**. The home directories are backed up +for the purposes of best-effort disaster recovery. This space is not +intended as an area for I/O to active jobs. **/home** is mounted +**read-only** on cluster worker nodes to enforce this policy. + +--- +### Common Directory + +{{% notice info %}} +You can access your common directory quickly using the $COMMON +environmental variable (i.e. '`cd $COMMON`') +{{% /notice %}} + +The common directory operates similarly to work and is mounted with +**read and write capability to worker nodes all HCC Clusters**. This +means that any files stored in common can be accessed from Crane and Tusker, making this directory ideal for items that need to be +accessed from multiple clusters such as reference databases and shared +data files. + +{{% notice warning %}} +Common is not designed for heavy I/O usage. Please continue to use your +work directory for active job output to ensure the best performance of +your jobs. +{{% /notice %}} + +Quotas for common are **30 TB per group**, with larger quotas available +for purchase if needed. However, files stored here will **not be backed +up** and are **not subject to purge** at this time. Please continue to +backup your files to prevent irreparable data loss. + +Additional information on using the common directories can be found in +the documentation on [Using the /common File System]({{< relref "using_the_common_file_system" >}}) + +--- +### High Performance Work Directory + +{{% notice info %}} +You can access your work directory quickly using the $WORK environmental +variable (i.e. '`cd $WORK'`). +{{% /notice %}} + +{{% panel theme="danger" header="**File Loss**" %}}The `/work` directories are **not backed up**. Irreparable data loss is possible with a mis-typed command. See [Preventing File Loss]({{< relref "preventing_file_loss" >}}) for strategies to avoid this.{{% /panel %}} + +Every user has a corresponding directory under /work using the same +naming convention as `/home` (i.e. `/work/[group]/[username]`). We +encourage all users to use this space for I/O to running jobs. This +directory can also be used when larger amounts of space are temporarily +needed. There is a **50TB per group quota**; space in /work is shared +among all users. It should be treated as short-term scratch space, and +**is not backed up**. **Please use the `hcc-du` command to check your +own and your group's usage, and back up and clean up your files at +reasonable intervals in $WORK.** + +--- +### Purge Policy + +HCC has a **purge policy on /work** for files that become dormant. + After **6 months of inactivity on a file (26 weeks)**, an automated +purge process will reclaim the used space of these dormant files. HCC +provides the **`hcc-purge`** utility to list both the summary and the +actual file paths of files that have been dormant for **24 weeks**. + This list is periodically generated; the timestamp of the last search +is included in the default summary output when calling `hcc-purge` with +no arguments. No output from `hcc-purge` indicates the last scan did +not find any dormant files. `hcc-purge -l` will use the less pager to +list the matching files for the user. The candidate list can also be +accessed at the following path:` /lustre/purge/current/${USER}.list`. + This list is updated twice a week, on Mondays and Thursdays. + +{{% notice warning %}} +`/work` is intended for recent job output and not long term storage. Evidence of circumventing the purge policy by users will result in consequences including account lockout. +{{% /notice %}} + +If you have space requirements outside what is currently provided, +please +email <a href="mailto:hcc-support@unl.edu" class="external-link">hcc-support@unl.edu</a> and +we will gladly discuss alternatives. + +--- +### [Attic]({{< relref "using_attic" >}}) + +Attic is a near line archive available for purchase at HCC. Attic +provides reliable large data storage that is designed to be more +reliable then `/work`, and larger than `/home`. Access to Attic is done +through [Globus Connect]({{< relref "../data_transfer/globus_connect" >}}). + +More details on Attic can be found on HCC's +<a href="https://hcc.unl.edu/attic" class="external-link">Attic</a> +website. + +--- +### [Globus Connect]({{< relref "../data_transfer/globus_connect" >}}) + +For moving large amounts of data into or out of HCC resources, users are +highly encouraged to consider using [Globus +Connect]({{< relref "../data_transfer/globus_connect" >}}). + +--- +### Using Box + +You can use your [UNL +Box.com]({{< relref "integrating_box_with_hcc" >}}) account to download and +upload files from any of the HCC clusters. + + diff --git a/content/Data_Storage/data_for_unmc_users_only.md b/content/handling_data/data_storage/data_for_unmc_users_only.md similarity index 100% rename from content/Data_Storage/data_for_unmc_users_only.md rename to content/handling_data/data_storage/data_for_unmc_users_only.md diff --git a/content/Data_Storage/integrating_box_with_hcc.md b/content/handling_data/data_storage/integrating_box_with_hcc.md similarity index 100% rename from content/Data_Storage/integrating_box_with_hcc.md rename to content/handling_data/data_storage/integrating_box_with_hcc.md diff --git a/content/handling_data/data_storage/linux_file_permissions.md b/content/handling_data/data_storage/linux_file_permissions.md new file mode 100644 index 00000000..e69de29b diff --git a/content/handling_data/data_storage/preventing_file_loss.md b/content/handling_data/data_storage/preventing_file_loss.md new file mode 100644 index 00000000..e69de29b diff --git a/content/handling_data/data_storage/using_attic.md b/content/handling_data/data_storage/using_attic.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Data_Storage/using_nus_gitlab_instance/_index.md b/content/handling_data/data_storage/using_nus_gitlab_instance/_index.md similarity index 100% rename from content/Data_Storage/using_nus_gitlab_instance/_index.md rename to content/handling_data/data_storage/using_nus_gitlab_instance/_index.md diff --git a/content/Data_Storage/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md b/content/handling_data/data_storage/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md similarity index 100% rename from content/Data_Storage/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md rename to content/handling_data/data_storage/using_nus_gitlab_instance/setting_up_gitlab_on_hcc_clusters.md diff --git a/content/Data_Storage/using_the_common_file_system.md b/content/handling_data/data_storage/using_the_common_file_system.md similarity index 100% rename from content/Data_Storage/using_the_common_file_system.md rename to content/handling_data/data_storage/using_the_common_file_system.md diff --git a/content/handling_data/data_transfer/_index.md b/content/handling_data/data_transfer/_index.md new file mode 100644 index 00000000..3717077b --- /dev/null +++ b/content/handling_data/data_transfer/_index.md @@ -0,0 +1,20 @@ ++++ +title = "Data Transfer" +description = "How to transfer data to/from HCC resources." +weight = "30" ++++ + +### [Globus Connect]({{< relref "../data_transfer/globus_connect/" >}}) + +For moving large amounts of data into or out of HCC resources, users are +highly encouraged to consider using [Globus +Connect]({{< relref "../data_transfer/globus_connect/" >}}). + +--- +### Using Box + +You can use your [UNL +Box.com]({{< relref "integrating_box_with_hcc" >}}) account to download and +upload files from any of the HCC clusters. + + diff --git a/content/Data_Transfer/connect_to_cb3_irods.md b/content/handling_data/data_transfer/connect_to_cb3_irods.md similarity index 100% rename from content/Data_Transfer/connect_to_cb3_irods.md rename to content/handling_data/data_transfer/connect_to_cb3_irods.md diff --git a/content/Data_Transfer/globus_connect/_index.md b/content/handling_data/data_transfer/globus_connect/_index.md similarity index 100% rename from content/Data_Transfer/globus_connect/_index.md rename to content/handling_data/data_transfer/globus_connect/_index.md diff --git a/content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md b/content/handling_data/data_transfer/globus_connect/activating_hcc_cluster_endpoints.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Data_Transfer/globus_connect/creating_globus_groups.md b/content/handling_data/data_transfer/globus_connect/creating_globus_groups.md similarity index 100% rename from content/Data_Transfer/globus_connect/creating_globus_groups.md rename to content/handling_data/data_transfer/globus_connect/creating_globus_groups.md diff --git a/content/Data_Transfer/globus_connect/file_sharing.md b/content/handling_data/data_transfer/globus_connect/file_sharing.md similarity index 100% rename from content/Data_Transfer/globus_connect/file_sharing.md rename to content/handling_data/data_transfer/globus_connect/file_sharing.md diff --git a/content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md b/content/handling_data/data_transfer/globus_connect/file_transfers_between_endpoints.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Data_Transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md b/content/handling_data/data_transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md similarity index 100% rename from content/Data_Transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md rename to content/handling_data/data_transfer/globus_connect/file_transfers_to_and_from_personal_workstations.md diff --git a/content/Data_Transfer/globus_connect/globus_command_line_interface.md b/content/handling_data/data_transfer/globus_connect/globus_command_line_interface.md similarity index 100% rename from content/Data_Transfer/globus_connect/globus_command_line_interface.md rename to content/handling_data/data_transfer/globus_connect/globus_command_line_interface.md diff --git a/content/handling_data/data_transfer/high_speed_data_transfers.md b/content/handling_data/data_transfer/high_speed_data_transfers.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Data_Transfer/using_rclone_with_hcc.md b/content/handling_data/data_transfer/using_rclone_with_hcc.md old mode 100755 new mode 100644 similarity index 100% rename from content/Data_Transfer/using_rclone_with_hcc.md rename to content/handling_data/data_transfer/using_rclone_with_hcc.md diff --git a/content/intro/_index.md b/content/intro/_index.md new file mode 100644 index 00000000..bab84a39 --- /dev/null +++ b/content/intro/_index.md @@ -0,0 +1,42 @@ ++++ +title = "Introduction to HPC" +description = "What is a cluster and what is HPC" +weight = "10" ++++ + +## What is HPC +High-Performance Computing (HPC) is the use of groups of computers to solve +computations a user or group would not be able to solve in a reasonable time-frame +on their own desktop or laptop. This is often achieved by splitting one large job +amongst numerous cores or 'workers'. This is similar to how a skyscraper is built +by numerous individuals rather than a single person. Many fields take advantage of +HPC including bioinformatics, chemistry, materials engineering, and newer fields +such as educational psychology and philosophy. + +{{< figure src="/images/cluster.png" height="450" >}} + +HPC clusters consist of four primary parts, the login node, management node, workers, +and a central storage array. All of these parts are bound together with a scheduler +such as HTCondor or SLURM. + +#### Login Node: +Users will automatically land on the login node when they log in to the clusters. +You will [submit jobs] ({{< ref "/submitting_jobs" >}}) using one of the schedulers +and pull the results of your jobs. Any jobs running on the login node directly will be +stopped so others can use the login node to submit jobs. + +#### Management Node: +The management node does as it sounds, it manages the cluster and provides a central +point to manage the rest of the systems. + +#### Worker Nodes: +The worker nodes are what run and process your jobs that are submitted from the schedulers. +Through the use of the schedulers, more work can be efficiently done by squeezing in all +jobs possible for the resources requested throughout the nodes. They also allow for fair +use computing by making sure one user or group is not using the entire cluster at once +and allowing others to use the clusters. + +#### Central Storage Array: +The central storage array allows all of the nodes within the cluster to have access to +the same files without needing to transfer them around. HCC has three arrays mounted on +the clusters with more details [here]({{< ref "/handling_data" >}}). diff --git a/content/Quickstarts/submitting_jobs.md b/content/submitting_jobs/_index.md similarity index 79% rename from content/Quickstarts/submitting_jobs.md rename to content/submitting_jobs/_index.md index 60f5f28c..08a19416 100644 --- a/content/Quickstarts/submitting_jobs.md +++ b/content/submitting_jobs/_index.md @@ -1,17 +1,18 @@ +++ title = "Submitting Jobs" description = "How to submit jobs to HCC resources" -weight = "10" +weight = "50" +++ Crane and Rhino are managed by -the [SLURM](https://slurm.schedmd.com) resource manager. +the [SLURM](https://slurm.schedmd.com) resource manager. In order to run processing on Crane, you must create a SLURM script that will run your processing. After submitting the job, SLURM will schedule your processing on an available worker node. -Before writing a submit file, you may need to compile your application. +Before writing a submit file, you may need to +[compile your application]({{< relref "/applications/user_software" >}}). - [Ensure proper working directory for job output](#ensure-proper-working-directory-for-job-output) - [Creating a SLURM Submit File](#creating-a-slurm-submit-file) @@ -52,7 +53,7 @@ look at the [MPI Submission Guide.]({{< relref "submitting_an_mpi_job" >}}) {{% /notice %}} A SLURM submit file is broken into 2 sections, the job description and -the processing. SLURM job description are prepended with `#SBATCH` in +the processing. SLURM job description are prepended with `#SBATCH` in the submit file. **SLURM Submit File** @@ -72,10 +73,10 @@ sleep 60 {{< /highlight >}} - **time** - Maximum walltime the job can run. After this time has expired, the + Maximum walltime the job can run. After this time has expired, the job will be stopped. - **mem-per-cpu** - Memory that is allocated per core for the job. If you exceed this + Memory that is allocated per core for the job. If you exceed this memory limit, your job will be stopped. - **mem** Specify the real memory required per node in MegaBytes. If you @@ -83,25 +84,25 @@ sleep 60 should ask for less memory than each node actually has. For Crane, the max is 500GB. - **job-name** - The name of the job. Will be reported in the job listing. + The name of the job. Will be reported in the job listing. - **partition** - The partition the job should run in. Partitions determine the job's - priority and on what nodes the partition can run on. See the + The partition the job should run in. Partitions determine the job's + priority and on what nodes the partition can run on. See the [Partitions]({{< relref "/guides/submitting_jobs/partitions/_index.md" >}}) page for a list of possible partitions. - **error** - Location of the stderr will be written for the job. `[groupname]` - and `[username]` should be replaced your group name and username. - Your username can be retrieved with the command `id -un` and your - group with `id -ng`. + Location of the stderr will be written for the job. `[groupname]` + and `[username]` should be replaced your group name and username. + Your username can be retrieved with the command `id -un` and your + group with `id -ng`. - **output** Location of the stdout will be written for the job. More advanced submit commands can be found on the [SLURM Docs](https://slurm.schedmd.com/sbatch.html). -You can also find an example of a MPI submission on [Submitting an MPI Job]({{< relref "submitting_an_mpi_job" >}}). +You can also find an example of a MPI submission on [Submitting an MPI Job]({{< relref "submitting_an_mpi_job" >}}). ### Submitting the job -Submitting the SLURM job is done by command `sbatch`. SLURM will read +Submitting the SLURM job is done by command `sbatch`. SLURM will read the submit file, and schedule the job according to the description in the submit file. @@ -114,14 +115,14 @@ Submitted batch job 24603 {{< /highlight >}} {{% /panel %}} -The job was successfully submitted. +The job was successfully submitted. ### Checking Job Status -Job status is found with the command `squeue`. It will provide +Job status is found with the command `squeue`. It will provide information such as: -- The State of the job: +- The State of the job: - **R** - Running - **PD** - Pending - Job is awaiting resource allocation. - Additional codes are available @@ -132,7 +133,7 @@ information such as: - Nodes running the job Checking the status of the job is easiest by filtering by your username, -using the `-u` option to squeue. +using the `-u` option to squeue. {{< highlight batch >}} $ squeue -u <username> @@ -142,7 +143,7 @@ $ squeue -u <username> Additionally, if you want to see the status of a specific partition, for example if you are part of a [partition]({{< relref "/guides/submitting_jobs/partitions/_index.md" >}}), -you can use the `-p` option to `squeue`: +you can use the `-p` option to `squeue`: {{< highlight batch >}} $ squeue -p esquared @@ -156,7 +157,7 @@ $ squeue -p esquared #### Checking Job Start You may view the start time of your job with the -command `squeue --start`. The output of the command will show the +command `squeue --start`. The output of the command will show the expected start time of the jobs. {{< highlight batch >}} @@ -178,11 +179,11 @@ $ squeue --start --user lypeng The output shows the expected start time of the jobs, as well as the reason that the jobs are currently idle (in this case, low priority of the user due to running numerous jobs already). - + #### Removing the Job -Removing the job is done with the `scancel` command. The only argument -to the `scancel` command is the job id. For the job above, the command +Removing the job is done with the `scancel` command. The only argument +to the `scancel` command is the job id. For the job above, the command is: {{< highlight batch >}} @@ -191,4 +192,4 @@ $ scancel 24605 ### Next Steps -{{% children %}} +{{% children %}} diff --git a/content/submitting_jobs/app_specific/_index.md b/content/submitting_jobs/app_specific/_index.md new file mode 100644 index 00000000..f720ecdf --- /dev/null +++ b/content/submitting_jobs/app_specific/_index.md @@ -0,0 +1,9 @@ ++++ +title = "Application Specific Guides" +weight = "100" ++++ + +In-depth guides for running applications on HCC resources +-------------------------------------- + +{{% children description="true" %}} diff --git a/content/submitting_jobs/app_specific/submitting_an_openmp_job.md b/content/submitting_jobs/app_specific/submitting_an_openmp_job.md new file mode 100644 index 00000000..e69de29b diff --git a/content/Submitting_Jobs/submitting_ansys_jobs.md b/content/submitting_jobs/app_specific/submitting_ansys_jobs.md similarity index 94% rename from content/Submitting_Jobs/submitting_ansys_jobs.md rename to content/submitting_jobs/app_specific/submitting_ansys_jobs.md index cf5953b1..26d09361 100644 --- a/content/Submitting_Jobs/submitting_ansys_jobs.md +++ b/content/submitting_jobs/app_specific/submitting_ansys_jobs.md @@ -37,5 +37,5 @@ Details of SLURM job submission can be found at [SUBMITTING JOBS]({{< relref "su ### Running ANSYS interactively 1. To use graphical user interface, users need to first setup X11 forwarding. [HOW TO SETUP X11 FORWARDING]({{< relref "how_to_setup_x11_forwarding" >}}) -1. Start an interactie job using srun. NOTE: users need to add \--licenses=ansys_research or \--licenses=ansys_teaching to the srun command. [SUBMITTING AN INTERACTIVE JOB]({{< relref "submitting_an_interactive_job" >}}) +1. Start an interactie job using srun. NOTE: users need to add \--licenses=ansys_research or \--licenses=ansys_teaching to the srun command. [SUBMITTING AN INTERACTIVE JOB]({{< relref "creating_an_interactive_job" >}}) 1. After the interactive job starts, execute "module load ansys/19.2", then run the ANSYS command, e.g. fluent, from command line. The GUI will show up if steps 1-2 are configured correctly. diff --git a/content/Submitting_Jobs/submitting_cuda_or_openacc_jobs.md b/content/submitting_jobs/app_specific/submitting_cuda_or_openacc_jobs.md similarity index 98% rename from content/Submitting_Jobs/submitting_cuda_or_openacc_jobs.md rename to content/submitting_jobs/app_specific/submitting_cuda_or_openacc_jobs.md index 4d49fda5..1e1b1764 100644 --- a/content/Submitting_Jobs/submitting_cuda_or_openacc_jobs.md +++ b/content/submitting_jobs/app_specific/submitting_cuda_or_openacc_jobs.md @@ -77,7 +77,7 @@ You must verify the GPU type and memory combination is valid based on the [avail ### Compiling Compilation of CUDA or OpenACC jobs must be performed on the GPU nodes. -Therefore, you must run an [interactive job]({{< relref "submitting_an_interactive_job" >}}) +Therefore, you must run an [interactive job]({{< relref "creating_an_interactive_job" >}}) to compile. An example command to compile in the `gpu` partition could be: {{< highlight batch >}} diff --git a/content/Submitting_Jobs/submitting_matlab_jobs.md b/content/submitting_jobs/app_specific/submitting_matlab_jobs.md similarity index 100% rename from content/Submitting_Jobs/submitting_matlab_jobs.md rename to content/submitting_jobs/app_specific/submitting_matlab_jobs.md diff --git a/content/Submitting_Jobs/submitting_r_jobs.md b/content/submitting_jobs/app_specific/submitting_r_jobs.md similarity index 100% rename from content/Submitting_Jobs/submitting_r_jobs.md rename to content/submitting_jobs/app_specific/submitting_r_jobs.md diff --git a/content/Submitting_Jobs/submitting_an_interactive_job.md b/content/submitting_jobs/creating_an_interactive_job.md similarity index 96% rename from content/Submitting_Jobs/submitting_an_interactive_job.md rename to content/submitting_jobs/creating_an_interactive_job.md index ba5eed2c..1100a049 100644 --- a/content/Submitting_Jobs/submitting_an_interactive_job.md +++ b/content/submitting_jobs/creating_an_interactive_job.md @@ -1,6 +1,7 @@ +++ -title = "Submitting an Interactive Job" +title = "Creating an Interactive Job" description = "How to run an interactive job on HCC resources." +weight=20 +++ {{% notice info %}} diff --git a/content/Submitting_Jobs/hcc_acknowledgment_credit.md b/content/submitting_jobs/hcc_acknowledgment_credit.md similarity index 99% rename from content/Submitting_Jobs/hcc_acknowledgment_credit.md rename to content/submitting_jobs/hcc_acknowledgment_credit.md index d04c6834..0ad8a029 100644 --- a/content/Submitting_Jobs/hcc_acknowledgment_credit.md +++ b/content/submitting_jobs/hcc_acknowledgment_credit.md @@ -1,6 +1,7 @@ +++ title = "HCC Acknowledgment Credit" description = "Details on the Acknowledgment Credit system." +weight=90 +++ {{% notice note %}} diff --git a/content/Submitting_Jobs/job_dependencies.md b/content/submitting_jobs/job_dependencies.md similarity index 99% rename from content/Submitting_Jobs/job_dependencies.md rename to content/submitting_jobs/job_dependencies.md index 98c96f2d..36aa971e 100644 --- a/content/Submitting_Jobs/job_dependencies.md +++ b/content/submitting_jobs/job_dependencies.md @@ -1,6 +1,7 @@ +++ title = "Job Dependencies" description = "How to use job dependencies with the SLURM scheduler." +weight=55 +++ The job dependency feature of SLURM is useful when you need to run diff --git a/content/Submitting_Jobs/monitoring_jobs.md b/content/submitting_jobs/monitoring_jobs.md similarity index 99% rename from content/Submitting_Jobs/monitoring_jobs.md rename to content/submitting_jobs/monitoring_jobs.md index 2e5d2e58..b6b458eb 100644 --- a/content/Submitting_Jobs/monitoring_jobs.md +++ b/content/submitting_jobs/monitoring_jobs.md @@ -1,6 +1,7 @@ +++ title = "Monitoring Jobs" description = "How to find out information about running and completed jobs." +weight=55 +++ Careful examination of running times, memory usage and output files will diff --git a/content/Submitting_Jobs/partitions/_index.md b/content/submitting_jobs/partitions/_index.md similarity index 99% rename from content/Submitting_Jobs/partitions/_index.md rename to content/submitting_jobs/partitions/_index.md index 751ebb35..88466f8e 100644 --- a/content/Submitting_Jobs/partitions/_index.md +++ b/content/submitting_jobs/partitions/_index.md @@ -1,8 +1,9 @@ +++ -title = "Partitions" +title = "Available Partitions" description = "Listing of partitions on Crane and Rhino." scripts = ["https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/jquery.tablesorter.min.js", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-pager.min.js","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/js/widgets/widget-filter.min.js","/js/sort-table.js"] css = ["http://mottie.github.io/tablesorter/css/theme.default.css","https://mottie.github.io/tablesorter/css/theme.dropbox.css", "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/jquery.tablesorter.pager.min.css","https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.31.1/css/filter.formatter.min.css"] +weight=70 +++ Partitions are used on Crane and Rhino to distinguish different diff --git a/content/Submitting_Jobs/partitions/crane_available_partitions.md b/content/submitting_jobs/partitions/crane_available_partitions.md similarity index 100% rename from content/Submitting_Jobs/partitions/crane_available_partitions.md rename to content/submitting_jobs/partitions/crane_available_partitions.md diff --git a/content/Submitting_Jobs/partitions/rhino_available_partitions.md b/content/submitting_jobs/partitions/rhino_available_partitions.md similarity index 100% rename from content/Submitting_Jobs/partitions/rhino_available_partitions.md rename to content/submitting_jobs/partitions/rhino_available_partitions.md diff --git a/content/Guides/sandstone/_index.md b/content/submitting_jobs/sandstone.md similarity index 96% rename from content/Guides/sandstone/_index.md rename to content/submitting_jobs/sandstone.md index 8106deff..079d5693 100644 --- a/content/Guides/sandstone/_index.md +++ b/content/submitting_jobs/sandstone.md @@ -1,51 +1,51 @@ -+++ -title = "Sandstone" -description = "How to use HCC's sandstone environment" -weight = "45" -+++ - - -### Overview - -The HCC Sandstone environment is a GUI interface to the Crane cluster featuring a file browser, text editor, web terminal and SLURM script helper, - -To login to the Sandstone environment, go to [crane.unl.edu](https://crane.unl.edu) in your web browser and sign in using your HCC Login Info and DUO authentication. - -Upon login, you will land at the File Browser. - - -### File Browser -The file browser allows you to view, access, and transfer files on Crane. On the left side you will have your available spaces, both your home and work directories. In the upper right of the page, you have buttons to upload files, create a file, and create a directory. - -{{< figure src="/images/SandstonefileBrowserOver.png">}} - -Clicking on either box under "My Spaces" will change your current directory to either your home or work directory and display your user/group usage and quotas. You can then navigate directories by clicking through them in a similar manner as you would with Windows or MacOS. - -{{< figure src="/images/SandstonefileOptions.png">}} - -Clicking on a file or directory will bring up some options such as the permissions and actions to do such as editing the file, duplicating or moving it, deleting it, and downloading it. - -### Editor -The editor is a basic text editor that allows you to have multiple files loaded and manipulate the files. A small file explorer is available on the left side to access more files. There are similar actions available for files above the mini file browser. - -{{< figure src="/images/Sandstoneeditor.png">}} - -Like most text editors, basic functions exist to undo and redo changes, find and replace, and most importantly, to save the file. - -{{< figure src="/images/SandstoneedtiorDropDown.png">}} - -### Terminal - -The terminal gives you access to the linux command line on crane, similar to what you would have if you SSH'd directly into Crane. Once the login and quote screen, you can enter commands and interact as you would with a standard terminal. -{{< figure src="/images/SandstoneTerminal.png">}} - -### Slurm Assist - -Slurm assist is a tool to help create and run slurm submit scripts. The first step is to select a base profile from the profile dropdown menu. Options will appear and the directives will automatically appear. The options are editable to better fit to your specific job with more details found in our submitting jobs documentation. After the directives are filled out, you can then add the commands to start your job in the script section. To save the job, select 'save script for later' and save the script in a known location for later. -{{< figure src="/images/SandstoneSASettings.png">}} -From here, you can also schedule the script recently create, by selecting "Schedule Job". A confirmation will appear with the Job ID and then an instruction on how to view the status of your job. -{{< figure src="/images/SandstoneJobConf.png">}} -{{< figure src="/images/SandstoneSAStatus.png">}} -You can view the progress of other jobs from slurm assist by going to the status page. Here you will see the State of the job, its ID, name, group name, runtime, and the start and end times. -{{< figure src="/images/SandstoneSAStatusPage.png">}} -{{< figure src="/images/SandstoneSAStatuses.png">}} \ No newline at end of file ++++ +title = "Sandstone" +description = "How to use HCC's sandstone environment" +weight = "95" ++++ + + +### Overview + +The HCC Sandstone environment is a GUI interface to the Crane cluster featuring a file browser, text editor, web terminal and SLURM script helper, + +To login to the Sandstone environment, go to [crane.unl.edu](https://crane.unl.edu) in your web browser and sign in using your HCC Login Info and DUO authentication. + +Upon login, you will land at the File Browser. + + +### File Browser +The file browser allows you to view, access, and transfer files on Crane. On the left side you will have your available spaces, both your home and work directories. In the upper right of the page, you have buttons to upload files, create a file, and create a directory. + +{{< figure src="/images/SandstonefileBrowserOver.png">}} + +Clicking on either box under "My Spaces" will change your current directory to either your home or work directory and display your user/group usage and quotas. You can then navigate directories by clicking through them in a similar manner as you would with Windows or MacOS. + +{{< figure src="/images/SandstonefileOptions.png">}} + +Clicking on a file or directory will bring up some options such as the permissions and actions to do such as editing the file, duplicating or moving it, deleting it, and downloading it. + +### Editor +The editor is a basic text editor that allows you to have multiple files loaded and manipulate the files. A small file explorer is available on the left side to access more files. There are similar actions available for files above the mini file browser. + +{{< figure src="/images/Sandstoneeditor.png">}} + +Like most text editors, basic functions exist to undo and redo changes, find and replace, and most importantly, to save the file. + +{{< figure src="/images/SandstoneedtiorDropDown.png">}} + +### Terminal + +The terminal gives you access to the linux command line on crane, similar to what you would have if you SSH'd directly into Crane. Once the login and quote screen, you can enter commands and interact as you would with a standard terminal. +{{< figure src="/images/SandstoneTerminal.png">}} + +### Slurm Assist + +Slurm assist is a tool to help create and run slurm submit scripts. The first step is to select a base profile from the profile dropdown menu. Options will appear and the directives will automatically appear. The options are editable to better fit to your specific job with more details found in our submitting jobs documentation. After the directives are filled out, you can then add the commands to start your job in the script section. To save the job, select 'save script for later' and save the script in a known location for later. +{{< figure src="/images/SandstoneSASettings.png">}} +From here, you can also schedule the script recently create, by selecting "Schedule Job". A confirmation will appear with the Job ID and then an instruction on how to view the status of your job. +{{< figure src="/images/SandstoneJobConf.png">}} +{{< figure src="/images/SandstoneSAStatus.png">}} +You can view the progress of other jobs from slurm assist by going to the status page. Here you will see the State of the job, its ID, name, group name, runtime, and the start and end times. +{{< figure src="/images/SandstoneSAStatusPage.png">}} +{{< figure src="/images/SandstoneSAStatuses.png">}} diff --git a/content/Submitting_Jobs/submitting_a_job_array.md b/content/submitting_jobs/submitting_a_job_array.md similarity index 99% rename from content/Submitting_Jobs/submitting_a_job_array.md rename to content/submitting_jobs/submitting_a_job_array.md index 08245c8d..5c6acfee 100644 --- a/content/Submitting_Jobs/submitting_a_job_array.md +++ b/content/submitting_jobs/submitting_a_job_array.md @@ -1,6 +1,7 @@ +++ title = "Submitting a Job Array" description = "How to use job arrays with the SLURM scheduler." +weight=30 +++ A job array is a set of jobs that share the same submit file, but will diff --git a/content/Submitting_Jobs/submitting_an_mpi_job.md b/content/submitting_jobs/submitting_an_mpi_job.md similarity index 99% rename from content/Submitting_Jobs/submitting_an_mpi_job.md rename to content/submitting_jobs/submitting_an_mpi_job.md index 89315e42..918a5253 100644 --- a/content/Submitting_Jobs/submitting_an_mpi_job.md +++ b/content/submitting_jobs/submitting_an_mpi_job.md @@ -1,6 +1,7 @@ +++ title = "Submitting an MPI Job" description = "How to submit an MPI job on HCC resources." +weight=40 +++ This script requests 16 cores on nodes with InfiniBand: diff --git a/content/Submitting_Jobs/submitting_htcondor_jobs.md b/content/submitting_jobs/submitting_htcondor_jobs.md similarity index 99% rename from content/Submitting_Jobs/submitting_htcondor_jobs.md rename to content/submitting_jobs/submitting_htcondor_jobs.md index a917caf2..a2a49e09 100644 --- a/content/Submitting_Jobs/submitting_htcondor_jobs.md +++ b/content/submitting_jobs/submitting_htcondor_jobs.md @@ -1,6 +1,7 @@ +++ title = "Submitting HTCondor Jobs" description = "How to submit HTCondor Jobs on HCC resources." +weight=50 +++ If you require features of HTCondor, such as DAGMan or Pegasus, -- GitLab