Commit 3101d39c authored by aknecht2's avatar aknecht2
Browse files

Documentation update for imgproc functions, and workflow examples.

parent 79cad3a1
......@@ -425,7 +425,7 @@ are defined for each image type -- as a result the template is quite long. Here
{
"name": "pot_filter_1",
"executable": "ih-color-filter",
"inputs": ["base", "/work/walia/common/ih/workflows/0184/input/fluosv_pot1.json"],
"inputs": ["base", "/work/walia/common/workflows/0184/input/fluosv_pot1.json"],
"outputs": ["pot1"],
"arguments": {
"--logic": "(((((((r - g) < 30) and (((r + g) + b) < 110)) or ((((r + g) + b) > 110) and ((r - g) < 50))) or (((r - g) < 25) and ((g - r) < 25))) or (g > 60)) not 1)"
......@@ -434,7 +434,7 @@ are defined for each image type -- as a result the template is quite long. Here
{
"name": "pot_filter_2",
"executable": "ih-color-filter",
"inputs": ["pot1", "/work/walia/common/ih/workflows/0184/input/fluosv_pot2.json"],
"inputs": ["pot1", "/work/walia/common/workflows/0184/input/fluosv_pot2.json"],
"outputs": ["pot2"],
"arguments": {
"--logic": "(((r + g) + b) > 120)"
......@@ -454,7 +454,7 @@ are defined for each image type -- as a result the template is quite long. Here
{
"name": "crop",
"executable": "ih-crop",
"inputs": ["filter", "/work/walia/common/ih/workflows/0184/input/fluosv_edge.json"],
"inputs": ["filter", "/work/walia/common/workflows/0184/input/fluosv_edge.json"],
"outputs": ["edged"],
"arguments": {},
"depends": ["main_filter"]
......@@ -465,7 +465,6 @@ are defined for each image type -- as a result the template is quite long. Here
"inputs": ["edged", "edged"],
"outputs": ["final"],
"arguments": {
"--resize": "",
"--basemin": 75
},
"depends": ["crop"]
......@@ -517,7 +516,7 @@ are defined for each image type -- as a result the template is quite long. Here
{
"name": "crop",
"executable": "ih-crop",
"inputs": ["morphed", "/work/walia/common/ih/workflows/0184/input/rgbtv_edge.json"],
"inputs": ["morphed", "/work/walia/common/workflows/0184/input/rgbtv_edge.json"],
"outputs": ["edged"],
"arguments": {},
"depends": ["closing"]
......@@ -536,8 +535,7 @@ are defined for each image type -- as a result the template is quite long. Here
"inputs": ["recolor", "recolor"],
"outputs": ["final"],
"arguments": {
"--basemin": 200,
"--resize": ""
"--basemin": 200
},
"depends": ["reconstitute"]
}
......@@ -576,8 +574,8 @@ are defined for each image type -- as a result the template is quite long. Here
"--basemin": 100,
"--padminy": 35,
"--padmaxy": 2000,
"--padminx": 25,
"--padmaxx": 25,
"--padminx": 50,
"--padmaxx": 50,
"--returnBound": ""
},
"depends": ["pot-detect"]
......@@ -609,8 +607,8 @@ are defined for each image type -- as a result the template is quite long. Here
"inputs": ["blurred"],
"outputs": ["thresh"],
"arguments": {
"--value": 200,
"--thresholdType": "binary",
"--value": 255,
"--thresholdType": "inverse",
"--adaptiveType": "mean",
"--blockSize": 15,
"--C": 3
......@@ -653,14 +651,14 @@ are defined for each image type -- as a result the template is quite long. Here
"inputs": ["pot_filtered", "box_roi"],
"outputs": ["box_filtered"],
"arguments": {
"--logic": "(((((r > g) and (r > b)) and (((b max g) - (b min g)) < (((r + g) + b) / 20))) or ((((b max g) max r) - ((b min g) min r)) < 10)) not 1)"
"--logic": "(((g - b) > 30) or ((r - b) > 30))"
},
"depends": ["pot-filter", "box-crop"]
},
{
"name": "gfilter1",
"executable": "ih-color-filter",
"inputs": ["box_filtered", "/work/walia/common/ih/workflows/0184/input/rgbsv_gray1.json"],
"inputs": ["box_filtered", "/work/walia/common/workflows/0184/input/rgbsv_gray1.json"],
"outputs": ["gray_filtered1"],
"arguments": {
"--logic": "((((b max g) max r) - ((b min g) min r)) > 50)"
......@@ -670,7 +668,7 @@ are defined for each image type -- as a result the template is quite long. Here
{
"name": "gfilter2",
"executable": "ih-color-filter",
"inputs": ["gray_filtered1", "/work/walia/common/ih/workflows/0184/input/rgbsv_gray2.json"],
"inputs": ["gray_filtered1", "/work/walia/common/workflows/0184/input/rgbsv_gray2.json"],
"outputs": ["gray_filtered2"],
"arguments": {
"--logic": "((((b max g) max r) - ((b min g) min r)) > 100)"
......@@ -680,7 +678,7 @@ are defined for each image type -- as a result the template is quite long. Here
{
"name": "crop",
"executable": "ih-crop",
"inputs": ["gray_filtered2", "/work/walia/common/ih/workflows/0184/input/rgbsv_edge.json"],
"inputs": ["gray_filtered2", "/work/walia/common/workflows/0184/input/rgbsv_edge.json"],
"outputs": ["edged"],
"arguments": {},
"depends": ["gfilter2"]
......@@ -699,8 +697,7 @@ are defined for each image type -- as a result the template is quite long. Here
"inputs": ["recolor2", "recolor2"],
"outputs": ["final"],
"arguments": {
"--basemin": 50,
"--resize": ""
"--basemin": 50
},
"depends": ["reconstitute2"]
}
......@@ -712,13 +709,14 @@ are defined for each image type -- as a result the template is quite long. Here
"extract": {
"histogram-bin": {
"--group": {"rgb": ["rgbsv", "rgbtv"], "fluo": ["fluosv"]},
"--chunks": {"rgb": 5, "fluo": 6},
"--chunks": {"rgb": [5, 5, 5], "fluo": [0, 9, 10]},
"--channels": {"rgb": [0, 1, 2], "fluo": [1, 2]}
},
"workflows": {
"rgbsv": {
"inputs": ["final"],
"arguments": {
"--dimfromroi": "pot_roi",
"--dimensions": "",
"--pixels": "",
"--moments": ""
......@@ -737,7 +735,7 @@ are defined for each image type -- as a result the template is quite long. Here
"fluosv": {
"inputs": ["final"],
"arguments": {
"--dimensions": "",
"--dimfromroi": "/work/walia/common/workflows/0184/input/fluosv_pot1.json",
"--pixels": "",
"--moments": ""
},
......@@ -748,6 +746,7 @@ are defined for each image type -- as a result the template is quite long. Here
}
Let's break it down into more manageable chunks. We will look at a few jobs from the workflows.
First, note that all workflow definitions must be under the "workflows" key. Secondly,
note that the names of the workflows i.e. in this case "fluosv", must match the image types
......@@ -840,14 +839,14 @@ all the numeric information you want to extract from your final images. Let's t
"extract": {
"histogram-bin": {
"--group": {"rgb": ["rgbsv", "rgbtv"], "fluo": ["fluosv"]},
"--chunks": {"rgb": 5, "fluo": 6},
"--chunks": {"rgb": [5, 5, 5], "fluo": [0, 9, 10]},
"--channels": {"rgb": [0, 1, 2], "fluo": [1, 2]}
},
"workflows": {
"rgbsv": {
"inputs": ["final"],
"arguments": {
"--dimensions": "",
"--dimfromroi": "pot_roi",
"--pixels": "",
"--moments": ""
},
......@@ -865,7 +864,7 @@ all the numeric information you want to extract from your final images. Let's t
"fluosv": {
"inputs": ["final"],
"arguments": {
"--dimensions": "",
"--dimfromroi": "/work/walia/common/workflows/0184/input/fluosv_pot1.json",
"--pixels": "",
"--moments": ""
},
......@@ -885,10 +884,11 @@ first define "--group". This allows us to group one or more imtypes from separa
workflows together. In this case, we group "rgbsv" and "rgbtv" together. This
allows us to consider color information from the RGB spectrum as a whole.
We name this group "rgb" -- group names are important for future "histogram-bin"
definitions. Next is "--chunks". For each group, you simply define how many
definitions. Next is "--chunks". For each group AND channel, you define how many
pieces you want the total calculated histogram to be broken up into. Remember
that color-space is 3 dimensional -- this means that specifying chunks = 5
will generate :math:`5^{3} = 125` bins. Finally, we define what channels to
that color-space is 3 dimensional, and the order is [B, G, R]. Here, we break up
rgb images into 5 chunks for each channel (for a total of 125 bins), and we break up
fluorescence images only by green and red channels. Finally, we define what channels to
process for each group. Channel 0 corresponds to blue, channel 1 corresponds
to green, and channel 2 corresponds to red. In this case, we leave channel 0
out of fluorescence processing -- this is because a majority of the blue values
......@@ -901,7 +901,14 @@ can locate the image you want to extract data from (it does not necessarily have
final image, though it generally is). In the arguments section, you define a list of the
numeric information you want to extract. For an example of all arguments you can specify,
look at the ih-extract script. If you specify "histogram-bin" for a particular
imtype, the "--colors" options is added automatically.
imtype, the "--colors" options is added automatically. Additionally, there are
multiple ways to extract dimensions from an image. If you pass the "--dimensions"
argument to the image, the dimensions are simply calculated as the height and width
of the final processed images. Alternative, you can extract dimensions with reference
to a region of interest by using the "--dimfromroi" argument. The "--dimfromroi" argument
can either by a path to an absolute roi file (as seen in the fluorescence workflow)
or it can be an intermediate output roi from the actually processing workflow
(as seen in the rgbsv workflow).
Finally, to actually generate your workflow, use the ih-run command. Run this
command from your top level folder that was generated by the ih-setup command.
......
......@@ -79,7 +79,7 @@ Here are the first few lines of the crawl.json file:
...
The data set located in /stash/project/@RicePhenomics/public/tinySet is publicly available,
so you shouldn't need to adjust this file at all! To setup a project directoy and load images run:
so you shouldn't need to adjust this file at all! To setup a project directory and load images run:
.. code-block:: bash
......@@ -96,6 +96,8 @@ setup.
Configuration
-------------
The configuration for osg submissions is very different, and you must additionally
generate an ih distribution, and ssh keys to transfer your files.
Here is the adjusted configuration file:
.. code-block:: javascript
......@@ -111,6 +113,7 @@ Here is the adjusted configuration file:
"universe": "vanilla",
"requirements": "OSGVO_OS_STRING == \"RHEL 6\" &amp;&amp; HAS_FILE_usr_lib64_libstdc___so_6 &amp;&amp; CVMFS_oasis_opensciencegrid_org_REVISION >= 3590",
"+WantsStashCache": "True",
"+ProjectName": "YOUR_OSG_PROJECT_NAME"
}
},
"osg": {
......@@ -123,18 +126,24 @@ Here is the adjusted configuration file:
"images": 2
},
"notify": {
"email": "avi@kurtknecht.com",
"email": "YOUR_EMAIL",
"pegasus_home": "/usr/share/pegasus/"
}
}
Most of the information is the same as the configuration file from the previous
workflow example, but there are a few differences. First, the condor configuration
workflow example, but there are a few differences. First, there is a version definition,
which should match the version of your ih tarball (Currently 1.0). Next, the condor configuration
should have the "requirements" and "+WantsStashCache" definitions, and they should
match as above. The "+ProjectName" definition is if you have a group on the OSG.
Next, there is a version definition, which should match the version of your ih
tar. To create a tarball distribution, navigate to your ih install, and run:
match as above. The "+ProjectName" definition is if you have a group on the OSG,
and it should match your group name. Finally, there is an "osg" specific key.
This requires a path to an ih tarball distribution, as well as a path to your
ssh private key.
Creating the ih Distribution
=============================
To create a tarball distribution, navigate to your ih install, and run:
.. code-block:: python
......@@ -160,6 +169,8 @@ full path to this file into the "tarball" definition.
# re-tar our folder
tar -zcvf ih-1.0.tar.gz ih-1.0
Creating ssh Keys
==================
Next, for data staging and transferring files to work successfully, you need to create an ssh key pair
for your workflow. Run the following:
......@@ -170,15 +181,16 @@ for your workflow. Run the following:
This while generate an ssh key pair. You will be prompted for a name and a
password. When prompted for the password, hit enter to leave the password blank.
After this finishes, there should be two files created in your ~/.ssh/ folder,
a private key file and public key file with the name that you gave. Append the
contents of the public key file to your authorized_keys files (create it if
necessary).
a private key file and public key file with the name that you gave. The public
key is simply the file that ends with ".pub". Append the contents of the public
key file to your ~/.ssh/authorized_keys files. If there is not a ~/.ssh/authorized_keys
files, then create it and append the contents of your public key to it.
.. code-block:: bash
cat ~/.ssh/KEY_NAME.pub >> ~/.ssh/authorized_keys
Make sure you provide the full path to the private key for the "ssh" definition.
Make sure you provide the full path to the PRIVATE key for the "ssh" definition.
Now you are ready to generate and submit your workflow! Make sure you cd
to your top level folder (awesome_project) and then run:
......
......@@ -4,7 +4,7 @@
{
"name": "pot_filter_1",
"executable": "ih-color-filter",
"inputs": ["base", "/Volumes/BLOO/Work/hcc/plantcv/new/workflow1/input/fluosv_pot1.json"],
"inputs": ["base", "/work/walia/common/workflows/0184/input/fluosv_pot1.json"],
"outputs": ["pot1"],
"arguments": {
"--logic": "(((((((r - g) < 30) and (((r + g) + b) < 110)) or ((((r + g) + b) > 110) and ((r - g) < 50))) or (((r - g) < 25) and ((g - r) < 25))) or (g > 60)) not 1)"
......@@ -13,7 +13,7 @@
{
"name": "pot_filter_2",
"executable": "ih-color-filter",
"inputs": ["pot1", "/Volumes/BLOO/Work/hcc/plantcv/new/workflow1/input/fluosv_pot2.json"],
"inputs": ["pot1", "/work/walia/common/workflows/0184/input/fluosv_pot2.json"],
"outputs": ["pot2"],
"arguments": {
"--logic": "(((r + g) + b) > 120)"
......@@ -33,7 +33,7 @@
{
"name": "crop",
"executable": "ih-crop",
"inputs": ["filter", "/Volumes/BLOO/Work/hcc/plantcv/new/workflow1/input/fluosv_edge.json"],
"inputs": ["filter", "/work/walia/common/workflows/0184/input/fluosv_edge.json"],
"outputs": ["edged"],
"arguments": {},
"depends": ["main_filter"]
......@@ -44,7 +44,6 @@
"inputs": ["edged", "edged"],
"outputs": ["final"],
"arguments": {
"--resize": "",
"--basemin": 75
},
"depends": ["crop"]
......@@ -96,7 +95,7 @@
{
"name": "crop",
"executable": "ih-crop",
"inputs": ["morphed", "/Volumes/BLOO/Work/hcc/plantcv/new/workflow1/input/rgbtv_edge.json"],
"inputs": ["morphed", "/work/walia/common/workflows/0184/input/rgbtv_edge.json"],
"outputs": ["edged"],
"arguments": {},
"depends": ["closing"]
......@@ -115,8 +114,7 @@
"inputs": ["recolor", "recolor"],
"outputs": ["final"],
"arguments": {
"--basemin": 200,
"--resize": ""
"--basemin": 200
},
"depends": ["reconstitute"]
}
......@@ -239,7 +237,7 @@
{
"name": "gfilter1",
"executable": "ih-color-filter",
"inputs": ["box_filtered", "/Volumes/BLOO/Work/hcc/plantcv/new/workflow1/input/rgbsv_gray1.json"],
"inputs": ["box_filtered", "/work/walia/common/workflows/0184/input/rgbsv_gray1.json"],
"outputs": ["gray_filtered1"],
"arguments": {
"--logic": "((((b max g) max r) - ((b min g) min r)) > 50)"
......@@ -249,7 +247,7 @@
{
"name": "gfilter2",
"executable": "ih-color-filter",
"inputs": ["gray_filtered1", "/Volumes/BLOO/Work/hcc/plantcv/new/workflow1/input/rgbsv_gray2.json"],
"inputs": ["gray_filtered1", "/work/walia/common/workflows/0184/input/rgbsv_gray2.json"],
"outputs": ["gray_filtered2"],
"arguments": {
"--logic": "((((b max g) max r) - ((b min g) min r)) > 100)"
......@@ -259,7 +257,7 @@
{
"name": "crop",
"executable": "ih-crop",
"inputs": ["gray_filtered2", "/Volumes/BLOO/Work/hcc/plantcv/new/workflow1/input/rgbsv_edge.json"],
"inputs": ["gray_filtered2", "/work/walia/common/workflows/0184/input/rgbsv_edge.json"],
"outputs": ["edged"],
"arguments": {},
"depends": ["gfilter2"]
......@@ -278,8 +276,7 @@
"inputs": ["recolor2", "recolor2"],
"outputs": ["final"],
"arguments": {
"--basemin": 50,
"--resize": ""
"--basemin": 50
},
"depends": ["reconstitute2"]
}
......@@ -298,7 +295,7 @@
"rgbsv": {
"inputs": ["final"],
"arguments": {
"--dimensions": "",
"--dimfromroi": "pot_roi",
"--pixels": "",
"--moments": ""
},
......@@ -316,7 +313,7 @@
"fluosv": {
"inputs": ["final"],
"arguments": {
"--dimensions": "",
"--dimfromroi": "/work/walia/common/workflows/0184/input/fluosv_pot1.json",
"--pixels": "",
"--moments": ""
},
......
......@@ -337,6 +337,9 @@ class Image(object):
return np.array(merged, dtype=np.int32)
def drawContours(self):
"""
A helper function that draws all detected contours in the image onto the image.
"""
if self._isColor():
binary = cv2.inRange(self.image.copy(), np.array([1, 1, 1], np.uint8), np.array([255, 255, 255], np.uint8))
else:
......@@ -463,6 +466,17 @@ class Image(object):
return
def addWeighted(self, image, weight1, weight2):
"""
:param image: The image to add.
:type image: str of np.ndarray
:param weight1: The weight to apply to the current image.
:type weight1: float
:param weight2: The weight to apply to the additional image.
:type weight2: float
This function adds an additional image to the current based on the provided
weights. Both positive and negative weights can be used.
"""
self.image = cv2.addWeighted(self.image, weight1, self._loadResource(image)[1], weight2, 0)
return
......@@ -543,7 +557,7 @@ class Image(object):
def rotateColor(self, color):
"""
:param color: Color shift to perform
:param color: Color shift to perform. Should be [b, g, r].
:type color: list
Shifts the entire color of the image based on the values in
......@@ -864,6 +878,17 @@ class Image(object):
return
def contourChop(self, binary, basemin = 100):
"""
:param binary: The binary image to find contours of.
:type binary: str of np.ndarray
:param basemin: The minimum area a contour must have to be considered part of the foreground.
:type basemin: int
This function works very similiarly to the :py:meth:`~ih.imgproc.Image.contourCut`
function, except that this function does not crop the image, but removes
all contours that fall below the threshold.
"""
bname, binary = self._loadResource(binary)
if self._isColor(binary):
binary = cv2.cvtColor(binary, cv2.COLOR_BGR2GRAY)
......@@ -874,6 +899,12 @@ class Image(object):
return
def getBounds(self):
"""
:return: The bounding box of the image.
:rtype: list
This function finds the bounding box of all contours in the image, and
returns a list of the form [miny, maxy, minx, maxx]
"""
binary = self.image.copy()
if self._isColor(binary):
binary = cv2.cvtColor(binary, cv2.COLOR_BGR2GRAY)
......@@ -898,7 +929,7 @@ class Image(object):
"""
:param binary: The binary image to find contours of.
:type binary: str or np.ndarray
:param basemin: The minimum area a contour must have to be consider part of the foreground.
:param basemin: The minimum area a contour must have to be considered part of the foreground.
:type basemin: int
:param padding: Padding add to all sides of the final roi.
:type padding: int
......@@ -1119,6 +1150,19 @@ class Image(object):
return moments
def extractDimsFromROI(self, roi):
"""
:param roi: The roi to calculate height from.
:type roi: list or roi file
:return: A list corresponding to the calculated height and width of the image.
:rtype: list
Returns a list with the follwoing form: [height, width]. This functions differs
from the :py:meth:`~ih.imgproc.Image.extractDimensions` in the way that height
is calculated. Rather than calculating the total height of the image,
the height is calculated from the top of the given ROI.
"""
pot = self._loadROI(roi)
plant = self.getBounds()
height = plant[0] - pot[0]
......@@ -1257,7 +1301,8 @@ class Image(object):
def extractColorChannels(self):
"""
This function is similar to the
This function extracts the total number of pixels of each color value
for each channel.
"""
b, g, r = cv2.split(self.image)
bdata, gdata, rdata = [], [], []
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment