From d424c4107980c6803ba9a4213ce3a6a0f6dc1381 Mon Sep 17 00:00:00 2001 From: Adam Caprez <acaprez2@unl.edu> Date: Mon, 11 Nov 2019 19:59:50 +0000 Subject: [PATCH] Add new V100 GPU info. --- .../submitting_cuda_or_openacc_jobs.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/content/Submitting_Jobs/submitting_cuda_or_openacc_jobs.md b/content/Submitting_Jobs/submitting_cuda_or_openacc_jobs.md index 2806fe88..8fdbbd8c 100644 --- a/content/Submitting_Jobs/submitting_cuda_or_openacc_jobs.md +++ b/content/Submitting_Jobs/submitting_cuda_or_openacc_jobs.md @@ -1,5 +1,5 @@ +++ -title = "Submitting CUDA or OpenACC Jobs" +title = "Submitting GPU Jobs" description = "How to submit GPU (CUDA/OpenACC) jobs on HCC resources." +++ @@ -9,13 +9,14 @@ Crane has four types of GPUs available in the **gpu** partition. The type of GPU is configured as a SLURM feature, so you can specify a type of GPU in your job resource requirements if necessary. -| Description | SLURM Feature | Available Hardware | -| -------------------- | ------------- | ---------------------------- | -| Tesla K20, non-IB | gpu_k20 | 3 nodes - 2 GPUs with 4 GB mem per node | -| Teska K20, with IB | gpu_k20 | 3 nodes - 3 GPUs with 4 GB mem per node | -| Tesla K40, with IB | gpu_k40 | 5 nodes - 4 K40M GPUs with 11 GB mem per node<br> 1 node - 2 K40C GPUs | -| Tesla P100, with OPA | gpu_p100 | 2 nodes - 2 GPUs with 12 GB per node | +| Description | SLURM Feature | Available Hardware | +| -------------------- | ------------- | ---------------------------- | +| Tesla K20, non-IB | gpu_k20 | 3 nodes - 2 GPUs with 4 GB mem per node | +| Teska K20, with IB | gpu_k20 | 3 nodes - 3 GPUs with 4 GB mem per node | +| Tesla K40, with IB | gpu_k40 | 5 nodes - 4 K40M GPUs with 11 GB mem per node<br> 1 node - 2 K40C GPUs | +| Tesla P100, with OPA | gpu_p100 | 2 nodes - 2 GPUs with 12 GB per node | | Tesla V100, with 10GbE | gpu_v100 | 1 node - 4 GPUs with 16 GB per node | +| Tesla V100, with OPA | gpu_v100 | 21 nodes - 2 GPUs with 32GB per node | To run your job on the next available GPU regardless of type, add the -- GitLab