Commit 3a016d03 authored by Mohammed Tanash's avatar Mohammed Tanash
Browse files

Merge branch 'AddExamples' into 'master'

Add Examples repo to the website

See merge request !254
parents 35f2fd24 b164ee24
......@@ -5,5 +5,6 @@ weight = "100"
In-depth guides for running applications on HCC resources
--------------------------------------
{{< readfile file="content/submitting_jobs/app_specific/job-examples/README.md" markdown="true" >}}
{{% children description="true" %}}
{{% children %}}
jupyter/.ipynb_checkpoints/*
jupyter/data/airline*
*/*.err
*/*.out
This diff is collapsed.
# The UNIVERSE defines an execution environment. You will almost always use VANILLA.
Universe = vanilla
# These are good base requirements for your jobs on the OSG. It is specific on OS and
# OS version, core count and memory, and wants to use the software modules.
Requirements = (( OSGVO_OS_STRING == "RHEL 6" ) || ( OSGVO_OS_STRING == "RHEL 7" )) && TARGET.Arch == "X86_64" && HAS_MODULES == True
request_cpus = 1
request_memory = 1 GB
# executable is the program your job will run It's often useful
# to create a shell script to "wrap" your actual work.
executable = ../scalingup-python-wrapper.sh
# files transferred into the job sandbox
transfer_input_files = ../rosen_brock_brute_opt.py
# error and output are the error and output channels from your job
# that HTCondor returns from the remote host.
output = Log/job.out.$(Cluster).$(Process)
error = Log/job.error.$(Cluster).$(Process)
# The log file is where HTCondor places information about your
# job's status, success, and resource consumption.
log = Log/job.log.$(Cluster).$(Process)
# Send the job to Held state on failure.
on_exit_hold = (ExitBySignal == True) || (ExitCode != 0)
# Periodically retry the jobs every 60 seconds, up to a maximum of 5 retries.
# The RANDOM_INTEGER(60, 600, 120) means random integers are generated between
# 60 and 600 seconds with a step size of 120 seconds. The failed jobs are
# randomly released with a spread of 1-10 minutes. Releasing multiple jobs at
# the same time causes stress for the login node, so the random spread is a
# good approach to periodically release the failed jobs.
PeriodicRelease = ( (CurrentTime - EnteredCurrentStatus) > $RANDOM_INTEGER(60, 600, 120) ) && ((NumJobStarts < 5))
# Queue is the "start button" - it launches any jobs that have been
# specified thus far.
queue 10
#!/bin/bash
grep "Search Result" Log/job.out.*.* | sort -n -k3 -r
# The UNIVERSE defines an execution environment. You will almost always use VANILLA.
Universe = vanilla
# These are good base requirements for your jobs on the OSG. It is specific on OS and
# OS version, core count and memory, and wants to use the software modules.
Requirements = (( OSGVO_OS_STRING == "RHEL 6" ) || ( OSGVO_OS_STRING == "RHEL 7" )) && TARGET.Arch == "X86_64" && HAS_MODULES == True
request_cpus = 1
request_memory = 1 GB
# executable is the program your job will run It's often useful
# to create a shell script to "wrap" your actual work.
executable = ../scalingup-python-wrapper.sh
# files transferred into the job sandbox
transfer_input_files = ../rosen_brock_brute_opt.py
# error and output are the error and output channels from your job
# that HTCondor returns from the remote host.
output = Log/job.out.$(Cluster).$(Process)
error = Log/job.error.$(Cluster).$(Process)
# The log file is where HTCondor places information about your
# job's status, success, and resource consumption.
log = Log/job.log.$(Cluster).$(Process)
# Send the job to Held state on failure.
on_exit_hold = (ExitBySignal == True) || (ExitCode != 0)
# Periodically retry the jobs every 60 seconds, up to a maximum of 5 retries.
# The RANDOM_INTEGER(60, 600, 120) means random integers are generated between
# 60 and 600 seconds with a step size of 120 seconds. The failed jobs are
# randomly released with a spread of 1-10 minutes. Releasing multiple jobs at
# the same time causes stress for the login node, so the random spread is a
# good approach to periodically release the failed jobs.
PeriodicRelease = ( (CurrentTime - EnteredCurrentStatus) > $RANDOM_INTEGER(60, 600, 120) ) && ((NumJobStarts < 5))
#Supply arguments
arguments = -9 9 -9 9
# Queue is the "start button" - it launches any jobs that have been
# specified thus far.
queue
arguments = -8 8 -8 8
queue
arguments = -7 7 -7 7
queue
arguments = -6 6 -6 6
queue
arguments = -5 5 -5 5
queue
arguments = -4 4 -4 4
queue
arguments = -3 3 -3 3
queue
arguments = -2 2 -2 2
queue
arguments = -1 1 -1 1
queue
#!/bin/bash
grep "Search Result" Log/job.out.*.* | sort -n -k3 -r
# The UNIVERSE defines an execution environment. You will almost always use VANILLA.
Universe = vanilla
# These are good base requirements for your jobs on the OSG. It is specific on OS and
# OS version, core count and memory, and wants to use the software modules.
Requirements = (( OSGVO_OS_STRING == "RHEL 6" ) || ( OSGVO_OS_STRING == "RHEL 7" )) && TARGET.Arch == "X86_64" && HAS_MODULES == True
request_cpus = 1
request_memory = 1 GB
# executable is the program your job will run It's often useful
# to create a shell script to "wrap" your actual work.
executable = ../scalingup-python-wrapper.sh
# files transferred into the job sandbox
transfer_input_files = ../rosen_brock_brute_opt.py
# error and output are the error and output channels from your job
# that HTCondor returns from the remote host.
output = Log/job.out.$(Cluster).$(Process)
error = Log/job.error.$(Cluster).$(Process)
# The log file is where HTCondor places information about your
# job's status, success, and resource consumption.
log = Log/job.log.$(Cluster).$(Process)
# Send the job to Held state on failure.
on_exit_hold = (ExitBySignal == True) || (ExitCode != 0)
# Periodically retry the jobs every 60 seconds, up to a maximum of 5 retries.
# The RANDOM_INTEGER(60, 600, 120) means random integers are generated between
# 60 and 600 seconds with a step size of 120 seconds. The failed jobs are
# randomly released with a spread of 1-10 minutes. Releasing multiple jobs at
# the same time causes stress for the login node, so the random spread is a
# good approach to periodically release the failed jobs.
PeriodicRelease = ( (CurrentTime - EnteredCurrentStatus) > $RANDOM_INTEGER(60, 600, 120) ) && ((NumJobStarts < 5))
# Queue command
queue arguments from (
-9 9 -9 9
-8 8 -8 8
-7 7 -7 7
-6 6 -6 6
-5 5 -5 5
-4 4 -4 4
-3 3 -3 3
-2 2 -2 2
-1 1 -1 1
)
#!/bin/bash
grep "Search Result" Log/job.out.*.* | sort -n -k3 -r
# The UNIVERSE defines an execution environment. You will almost always use VANILLA.
Universe = vanilla
# These are good base requirements for your jobs on the OSG. It is specific on OS and
# OS version, core count and memory, and wants to use the software modules.
Requirements = (( OSGVO_OS_STRING == "RHEL 6" ) || ( OSGVO_OS_STRING == "RHEL 7" )) && TARGET.Arch == "X86_64" && HAS_MODULES == True
request_cpus = 1
request_memory = 1 GB
# executable is the program your job will run It's often useful
# to create a shell script to "wrap" your actual work.
executable = ../scalingup-python-wrapper.sh
# files transferred into the job sandbox
transfer_input_files = ../rosen_brock_brute_opt.py
# error and output are the error and output channels from your job
# that HTCondor returns from the remote host.
output = Log/job.out.$(Cluster).$(Process)
error = Log/job.error.$(Cluster).$(Process)
# The log file is where HTCondor places information about your
# job's status, success, and resource consumption.
log = Log/job.log.$(Cluster).$(Process)
# Send the job to Held state on failure.
on_exit_hold = (ExitBySignal == True) || (ExitCode != 0)
# Periodically retry the jobs every 60 seconds, up to a maximum of 5 retries.
# The RANDOM_INTEGER(60, 600, 120) means random integers are generated between
# 60 and 600 seconds with a step size of 120 seconds. The failed jobs are
# randomly released with a spread of 1-10 minutes. Releasing multiple jobs at
# the same time causes stress for the login node, so the random spread is a
# good approach to periodically release the failed jobs.
PeriodicRelease = ( (CurrentTime - EnteredCurrentStatus) > $RANDOM_INTEGER(60, 600, 120) ) && ((NumJobStarts < 5))
arguments = $(x_low) $(x_high) $(y_low) $(y_high)
# Queue command
queue x_low x_high y_low y_high from (
-9, 9, -9, 9
-8, 8, -8, 8
-7, 7, -7, 7
-6, 6, -6, 6
-5, 5, -5, 5
-4, 4, -4, 4
-3, 3, -3, 3
-2, 2, -2, 2
-1, 1, -1, 1
)
#!/bin/bash
grep "Search Result" Log/job.out.*.* | sort -n -k3 -r
# Computes the minimum of Rosenbrock function (also known as banana function). Popular function to test robustness of an optimization method.
# Before running the code, do module load python/3.4 and module load all-pkgs
# Program is executed with four optional arguments. program.py low_x1 high_x1 low_x2 high_x2
# If you skip the optional arguments, random values are assigned.
import sys
import numpy
from scipy import optimize
from random import uniform
def rosenbrock(coordinates): # The rosenbrock function
x = coordinates[0]
y = coordinates[1]
f = (1 - x)**2 + 100.0*(y - x**2)**2
return f
if __name__ == "__main__":
# assign defualt values from uniform random numbers
x_low = uniform(-10,0)
x_high = uniform(0,10)
y_low = uniform(-10,0)
y_high = uniform(0,10)
bound_array = [x_low, x_high, y_low, y_high]
for i in range(1,len(sys.argv)):
if i < 5:
# Replace the random values with the supplied values
bound_array[i-1] = float(sys.argv[i])
# The range for brute function requires in tuples
brute_range = ((bound_array[0],bound_array[1]), (bound_array[2], bound_array[3]))
print('Search Boundary x1= {0:3.3f} x2= {1:3.3f} x3= {2:3.3f} x4= {3:3.3f}'.format(*bound_array))
# Here we are doing a brute force optimization. The function is evaluated in grids of points.
# brute_range is a tuple and defines the boundary for the grid points
# finish=None means no local search. To make the search efficient choose finish=optimize.fmin
result_from_brute = optimize.brute(rosenbrock, brute_range, full_output=True, finish=None)
function_min = result_from_brute[1]
coordinate_of_min = result_from_brute[0]
#print ('Initial Coordinates= ',brute_range)
print ('Search Result= ',function_min, coordinate_of_min)
#!/bin/bash
module load python/3.4
module load all-pkgs
python ./rosen_brock_brute_opt.py $1 $2 $3 $4
# The UNIVERSE defines an execution environment. You will almost always use VANILLA.
Universe = vanilla
# These are good base requirements for your jobs on OSG. It is specific on OS and
# OS version, core count and memory, and wants to use the software modules.
Requirements = (( OSGVO_OS_STRING == "RHEL 6" ) || ( OSGVO_OS_STRING == "RHEL 7" )) && Arch == "X86_64" && HAS_MODULES == True
request_cpus = 1
request_memory = 1 GB
# EXECUTABLE is the program your job will run It's often useful
# to create a shell script to "wrap" your actual work.
Executable = short_with_input_output_transfer.sh
# ERROR and OUTPUT are the error and output channels from your job
# that HTCondor returns from the remote host. $(Cluster) is the
# ID HTCondor assigns to the job and $(Process) is the ID HTCondor
# assigns within a set of jobs.
Error = job.$(Cluster).$(Process).error
Output = job.$(Cluster).$(Process).output
# The LOG file is where HTCondor places information about your
# job's status, success, and resource consumption.
Log = job.log
# Send the job to Held state on failure.
on_exit_hold = (ExitBySignal == True) || (ExitCode != 0)
# Periodically retry the jobs every 60 seconds, up to a maximum of 5 retries.
periodic_release = (NumJobStarts < 5) && ((CurrentTime - EnteredCurrentStatus) > 60)
# TRANSFER_INPUT_FILES defines which files should be transferred to the job.
# Please note that this should only be used for relatively small files
transfer_input_files = input.txt
# TRANSFER_OUTPUT_FILES defines which files should be transferred from the job back to
# the submit host.
# Please note that this should only be used for relatively small files
transfer_output_files = output.txt
# ARGUMENTS is a way to pass command line input to the EXECUTABLE
arguments = input.txt
# QUEUE is the "start button" - it launches any jobs that have been
# specified thus far.
queue 1
# The UNIVERSE defines an execution environment. You will almost always use VANILLA.
Universe = vanilla
# These are good base requirements for your jobs on OSG. It is specific on OS and
# OS version, core cound and memory, and wants to use the software modules.
Requirements = (( OSGVO_OS_STRING == "RHEL 6" ) || ( OSGVO_OS_STRING == "RHEL 7" )) && Arch == "X86_64" && HAS_MODULES == True
request_cpus = 1
request_memory = 1 GB
request_disk = 10 GB
# EXECUTABLE is the program your job will run It's often useful
# to create a shell script to "wrap" your actual work.
Executable = short.sh
#Arguments =
# ERROR and OUTPUT are the error and output channels from your job
# that HTCondor returns from the remote host.
Error = job.$(Cluster).$(Process).error
Output = job.$(Cluster).$(Process).output
# The LOG file is where HTCondor places information about your
# job's status, success, and resource consumption.
Log = job.log
# Send the job to Held state on failure.
on_exit_hold = (ExitBySignal == True) || (ExitCode != 0)
# Periodically retry the jobs every 1 hour, up to a maximum of 5 retries.
periodic_release = (NumJobStarts < 5) && ((CurrentTime - EnteredCurrentStatus) > 60*60)
# QUEUE is the "start button" - it launches any jobs that have been
# specified thus far.
Queue 1
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment