Skip to content
Snippets Groups Projects
Commit e3049f2b authored by Caughlin Bohn's avatar Caughlin Bohn
Browse files

Replaced dropdowns with links.

parent 5cb7a567
Branches
No related tags found
1 merge request!252Running SAS on HCC
......@@ -21,72 +21,9 @@ $ mkdir serial_dir
In the subdirectory `serial_dir`, save all the relevant Fortran/C codes. Here we include two demo
programs, `demo_f_serial.f90` and `demo_c_serial.c`, that compute the sum from 1 to 20.
{{%expand "demo_f_serial.f90" %}}
{{< highlight bash >}}
Program demo_f_serial
implicit none
integer, parameter :: N = 20
real*8 w
integer i
common/sol/ x
real*8 x
real*8, dimension(N) :: y
do i = 1,N
w = i*1d0
call proc(w)
y(i) = x
write(6,*) 'i,x = ', i, y(i)
enddo
write(6,*) 'sum(y) =',sum(y)
Stop
End Program
Subroutine proc(w)
real*8, intent(in) :: w
common/sol/ x
real*8 x
x = w
Return
End Subroutine
{{< /highlight >}}
{{% /expand %}}
{{%expand "demo_c_serial.c" %}}
{{< highlight c >}}
//demo_c_serial
#include <stdio.h>
double proc(double w){
double x;
x = w;
return x;
}
int main(int argc, char* argv[]){
int N=20;
double w;
int i;
double x;
double y[N];
double sum;
for (i = 1; i <= N; i++){
w = i*1e0;
x = proc(w);
y[i-1] = x;
printf("i,x= %d %lf\n", i, y[i-1]) ;
}
sum = 0e0;
for (i = 1; i<= N; i++){
sum = sum + y[i-1];
}
printf("sum(y)= %lf\n", sum);
return 0;
}
{{< /highlight >}}
{{% /expand %}}
[demo_c_serial.c](https://raw.githubusercontent.com/unlhcc/job-examples/master/C/demo_c_serial.c)
[demo_f_serial.f90](https://raw.githubusercontent.com/unlhcc/job-examples/master/fortran/demo_f_serial.f90)
---
......@@ -121,33 +58,10 @@ Create a submit script to request one core (default) and 1-min run time
on the supercomputer. The name of the main program enters at the last
line.
{{% panel header="`submit_f.serial`"%}}
{{< highlight bash >}}
#!/bin/sh
#SBATCH --mem-per-cpu=1024
#SBATCH --time=00:01:00
#SBATCH --job-name=Fortran
#SBATCH --error=Fortran.%J.err
#SBATCH --output=Fortran.%J.out
module load compiler/gcc/4.9
./demo_f_serial.x
{{< /highlight >}}
{{% /panel %}}
[submit_f.serial](https://raw.githubusercontent.com/unlhcc/job-examples/master/fortran/submit_f.serial)
[submit_c.serial](https://raw.githubusercontent.com/unlhcc/job-examples/master/C/submit_c.serial)
{{% panel header="`submit_c.serial`"%}}
{{< highlight bash >}}
#!/bin/sh
#SBATCH --mem-per-cpu=1024
#SBATCH --time=00:01:00
#SBATCH --job-name=C
#SBATCH --error=C.%J.err
#SBATCH --output=C.%J.out
module load compiler/gcc/4.9
./demo_c_serial.x
{{< /highlight >}}
{{% /panel %}}
#### Submit the Job
......@@ -164,56 +78,4 @@ Replace `<username>` with your HCC username.
#### Sample Output
The sum from 1 to 20 is computed and printed to the `.out` file (see
below).
{{%expand "Fortran.out" %}}
{{< highlight batchfile>}}
i,x = 1 1.0000000000000000
i,x = 2 2.0000000000000000
i,x = 3 3.0000000000000000
i,x = 4 4.0000000000000000
i,x = 5 5.0000000000000000
i,x = 6 6.0000000000000000
i,x = 7 7.0000000000000000
i,x = 8 8.0000000000000000
i,x = 9 9.0000000000000000
i,x = 10 10.000000000000000
i,x = 11 11.000000000000000
i,x = 12 12.000000000000000
i,x = 13 13.000000000000000
i,x = 14 14.000000000000000
i,x = 15 15.000000000000000
i,x = 16 16.000000000000000
i,x = 17 17.000000000000000
i,x = 18 18.000000000000000
i,x = 19 19.000000000000000
i,x = 20 20.000000000000000
sum(y) = 210.00000000000000
{{< /highlight >}}
{{% /expand %}}
{{%expand "C.out" %}}
{{< highlight batchfile>}}
i,x= 1 1.000000
i,x= 2 2.000000
i,x= 3 3.000000
i,x= 4 4.000000
i,x= 5 5.000000
i,x= 6 6.000000
i,x= 7 7.000000
i,x= 8 8.000000
i,x= 9 9.000000
i,x= 10 10.000000
i,x= 11 11.000000
i,x= 12 12.000000
i,x= 13 13.000000
i,x= 14 14.000000
i,x= 15 15.000000
i,x= 16 16.000000
i,x= 17 17.000000
i,x= 18 18.000000
i,x= 19 19.000000
i,x= 20 20.000000
sum(y)= 210.000000
{{< /highlight >}}
{{% /expand %}}
The sum from 1 to 20 is computed and printed to the `.out` files.
......@@ -30,160 +30,9 @@ outputs from all worker cores and perform an overall summation. For easy
comparison with the serial code ([Fortran/C on HCC]({{< relref "fortran_c_on_hcc">}})), the
added lines in the parallel code (MPI) are marked with "!=" or "//=".
{{%expand "demo_f_mpi.f90" %}}
{{< highlight fortran >}}
Program demo_f_mpi
!====== MPI =====
use mpi
!================
implicit none
integer, parameter :: N = 20
real*8 w
integer i
common/sol/ x
real*8 x
real*8, dimension(N) :: y
!============================== MPI =================================
integer ind
real*8, dimension(:), allocatable :: y_local
integer numnodes,myid,rc,ierr,start_local,end_local,N_local
real*8 allsum
!====================================================================
!============================== MPI =================================
call mpi_init( ierr )
call mpi_comm_rank ( mpi_comm_world, myid, ierr )
call mpi_comm_size ( mpi_comm_world, numnodes, ierr )
!
N_local = N/numnodes
allocate ( y_local(N_local) )
start_local = N_local*myid + 1
end_local = N_local*myid + N_local
!====================================================================
do i = start_local, end_local
w = i*1d0
call proc(w)
ind = i - N_local*myid
y_local(ind) = x
! y(i) = x
! write(6,*) 'i, y(i)', i, y(i)
enddo
! write(6,*) 'sum(y) =',sum(y)
!============================================== MPI =====================================================
call mpi_reduce( sum(y_local), allsum, 1, mpi_real8, mpi_sum, 0, mpi_comm_world, ierr )
call mpi_gather ( y_local, N_local, mpi_real8, y, N_local, mpi_real8, 0, mpi_comm_world, ierr )
if (myid == 0) then
write(6,*) '-----------------------------------------'
write(6,*) '*Final output from... myid=', myid
write(6,*) 'numnodes =', numnodes
write(6,*) 'mpi_sum =', allsum
write(6,*) 'y=...'
do i = 1, N
write(6,*) y(i)
enddo
write(6,*) 'sum(y)=', sum(y)
endif
deallocate( y_local )
call mpi_finalize(rc)
!========================================================================================================
Stop
End Program
Subroutine proc(w)
real*8, intent(in) :: w
common/sol/ x
real*8 x
x = w
Return
End Subroutine
{{< /highlight >}}
{{% /expand %}}
{{%expand "demo_c_mpi.c" %}}
{{< highlight c >}}
//demo_c_mpi
#include <stdio.h>
//======= MPI ========
#include "mpi.h"
#include <stdlib.h>
//====================
[demo_f_mpi.f90](https://raw.githubusercontent.com/unlhcc/job-examples/master/fortran/demo_f_mpi.f90)
double proc(double w){
double x;
x = w;
return x;
}
int main(int argc, char* argv[]){
int N=20;
double w;
int i;
double x;
double y[N];
double sum;
//=============================== MPI ============================
int ind;
double *y_local;
int numnodes,myid,rc,ierr,start_local,end_local,N_local;
double allsum;
//================================================================
//=============================== MPI ============================
MPI_Init(&argc, &argv);
MPI_Comm_rank( MPI_COMM_WORLD, &myid );
MPI_Comm_size ( MPI_COMM_WORLD, &numnodes );
N_local = N/numnodes;
y_local=(double *) malloc(N_local*sizeof(double));
start_local = N_local*myid + 1;
end_local = N_local*myid + N_local;
//================================================================
for (i = start_local; i <= end_local; i++){
w = i*1e0;
x = proc(w);
ind = i - N_local*myid;
y_local[ind-1] = x;
// y[i-1] = x;
// printf("i,x= %d %lf\n", i, y[i-1]) ;
}
sum = 0e0;
for (i = 1; i<= N_local; i++){
sum = sum + y_local[i-1];
}
// printf("sum(y)= %lf\n", sum);
//====================================== MPI ===========================================
MPI_Reduce( &sum, &allsum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
MPI_Gather( &y_local[0], N_local, MPI_DOUBLE, &y[0], N_local, MPI_DOUBLE, 0, MPI_COMM_WORLD );
if (myid == 0){
printf("-----------------------------------\n");
printf("*Final output from... myid= %d\n", myid);
printf("numnodes = %d\n", numnodes);
printf("mpi_sum = %lf\n", allsum);
printf("y=...\n");
for (i = 1; i <= N; i++){
printf("%lf\n", y[i-1]);
}
sum = 0e0;
for (i = 1; i<= N; i++){
sum = sum + y[i-1];
}
printf("sum(y) = %lf\n", sum);
}
free( y_local );
MPI_Finalize ();
//======================================================================================
return 0;
}
{{< /highlight >}}
{{% /expand %}}
[demo_c_mpi.c](https://raw.githubusercontent.com/unlhcc/job-examples/master/C/demo_c_mpi.c)
---
......@@ -210,33 +59,10 @@ Create a submit script to request 5 cores (with `--ntasks`). A parallel
execution command `mpirun ./` needs to enter to last line before the
main program name.
{{% panel header="`submit_f.mpi`"%}}
{{< highlight bash >}}
#!/bin/sh
#SBATCH --ntasks=5
#SBATCH --mem-per-cpu=1024
#SBATCH --time=00:01:00
#SBATCH --job-name=Fortran
#SBATCH --error=Fortran.%J.err
#SBATCH --output=Fortran.%J.out
mpirun ./demo_f_mpi.x
{{< /highlight >}}
{{% /panel %}}
[submit_f.mpi](https://raw.githubusercontent.com/unlhcc/job-examples/master/fortran/submit_f.mpi)
{{% panel header="`submit_c.mpi`"%}}
{{< highlight bash >}}
#!/bin/sh
#SBATCH --ntasks=5
#SBATCH --mem-per-cpu=1024
#SBATCH --time=00:01:00
#SBATCH --job-name=C
#SBATCH --error=C.%J.err
#SBATCH --output=C.%J.out
[submit_c.mpi](https://raw.githubusercontent.com/unlhcc/job-examples/master/C/submit_c.mpi)
mpirun ./demo_c_mpi.x
{{< /highlight >}}
{{% /panel %}}
#### Submit the Job
......@@ -254,69 +80,6 @@ Replace `<username>` with your HCC username.
Sample Output
-------------
The sum from 1 to 20 is computed and printed to the `.out` file (see
below). The outputs from the 5 cores are collected and processed by the
The sum from 1 to 20 is computed and printed to the `.out` files. The outputs from the 5 cores are collected and processed by the
master core (i.e. `myid=0`).
{{%expand "Fortran.out" %}}
{{< highlight batchfile>}}
-----------------------------------------
*Final output from... myid= 0
numnodes = 5
mpi_sum = 210.00000000000000
y=...
1.0000000000000000
2.0000000000000000
3.0000000000000000
4.0000000000000000
5.0000000000000000
6.0000000000000000
7.0000000000000000
8.0000000000000000
9.0000000000000000
10.000000000000000
11.000000000000000
12.000000000000000
13.000000000000000
14.000000000000000
15.000000000000000
16.000000000000000
17.000000000000000
18.000000000000000
19.000000000000000
20.000000000000000
sum(y)= 210.00000000000000
{{< /highlight >}}
{{% /expand %}}
{{%expand "C.out" %}}
{{< highlight batchfile>}}
-----------------------------------
*Final output from... myid= 0
numnodes = 5
mpi_sum = 210.000000
y=...
1.000000
2.000000
3.000000
4.000000
5.000000
6.000000
7.000000
8.000000
9.000000
10.000000
11.000000
12.000000
13.000000
14.000000
15.000000
16.000000
17.000000
18.000000
19.000000
20.000000
sum(y) = 210.000000
{{< /highlight >}}
{{% /expand %}}
......@@ -20,39 +20,7 @@ called `sas_demo` under your `$WORK` directory. 
In the subdirectory `sas_demo`, save the sas code. Here we include a single demo
programs, `t_test.sas`, to perform a t test analysis on a small data set.
{{%expand "t-test.sas" %}}
{{< highlight sas >}}
* load the dataset;
data pulse;
input pre post;
datalines;
62 61
63 62
58 59
64 61
64 63
61 58
68 61
66 64
65 62
67 68
69 65
61 67
64 65
61 63
63 62
;
* it is always a good idea to print out the dataset;
proc print;
run;
* perform the analysis using PROC TTEST;
proc ttest;
paired pre*post; * tells sas to compute the test for the paired differences;
run;
{{< /highlight >}}
{{% /expand %}}
[t_test.sas](https://raw.githubusercontent.com/unlhcc/job-examples/master/sas/t-test.sas)
......@@ -64,22 +32,7 @@ Create a submit script to request one core (default) and 10-min run time
on the supercomputer. The name of the main program enters at the last
line.
{{% panel header="`sas.submit`"%}}
{{< highlight bash >}}
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --mem=1gb
#SBATCH --time=00:10:00
#SBATCH --job-name=sas_example
#SBATCH --error=sas_example.%J.err
#SBATCH --output=sas_example.%J.out
module load sas/9.4
sas t-test.sas
{{< /highlight >}}
{{% /panel %}}
[sas.submit](https://raw.githubusercontent.com/unlhcc/job-examples/master/sas/sas.submit)
#### Submit the Job
......
......@@ -31,8 +31,7 @@ depends on Jobs B and C completing.
{{< figure src="/images/4980738.png" width="400" >}}
The SLURM submit files for each step are below.
{{%expand "JobA.submit" %}}
{{% panel theme="info" header="JobA.submit" %}}
{{< highlight batch >}}
#!/bin/sh
#SBATCH --job-name=JobA
......@@ -44,10 +43,10 @@ echo "I'm job A"
echo "Sample job A output" > jobA.out
sleep 120
{{< /highlight >}}
{{% /expand %}}
{{% /panel %}}
{{%expand "JobB.submit" %}}
{{% panel theme="info" header="JobB.submit" %}}
{{< highlight batch >}}
#!/bin/sh
#SBATCH --job-name=JobB
......@@ -62,9 +61,9 @@ echo "" >> jobB.out
echo "Sample job B output" >> jobB.out
sleep 120
{{< /highlight >}}
{{% /expand %}}
{{% /panel %}}
{{%expand "JobC.submit" %}}
{{% panel theme="info" header="JobC.submit" %}}
{{< highlight batch >}}
#!/bin/sh
#SBATCH --job-name=JobC
......@@ -79,9 +78,9 @@ echo "" >> jobC.out
echo "Sample job C output" >> jobC.out
sleep 120
{{< /highlight >}}
{{% /expand %}}
{{% /panel %}}
{{%expand "JobC.submit" %}}
{{% panel theme="info" header="JobD.submit" %}}
{{< highlight batch >}}
#!/bin/sh
#SBATCH --job-name=JobD
......@@ -98,7 +97,7 @@ echo "" >> jobD.out
echo "Sample job D output" >> jobD.out
sleep 120
{{< /highlight >}}
{{% /expand %}}
{{% /panel %}}
To start the workflow, submit Job A first:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment