Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
C
Cross-Domain-Identification
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
ECE UNL Image and Signal Analysis Lab
Cross-Domain-Identification
Commits
943e79b6
Commit
943e79b6
authored
Aug 19, 2020
by
cedricnimpa
Browse files
Options
Downloads
Patches
Plain Diff
Update main.py
parent
d8a5e520
Branches
Branches containing commit
No related tags found
No related merge requests found
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
main.py
+6
-1065
6 additions, 1065 deletions
main.py
with
6 additions
and
1065 deletions
main.py
+
6
−
1065
View file @
943e79b6
import
tensorflow
as
tf
from
keras_preprocessing
import
image
from
keras_applications.vgg16
import
VGG16
from
keras_applications.vgg16
import
preprocess_input
as
vgg_preprocess_input
from
keras_applications.resnet50
import
ResNet50
from
keras_applications.resnet
import
preprocess_input
as
resnet_preprocess_input
from
sklearn.decomposition
import
PCA
from
sklearn.preprocessing
import
StandardScaler
from
cyvlfeat.sift
import
dsift
from
regression_models
import
*
from
regression_models
import
*
from
RST
import
*
from
extract_features
import
*
from
loss
import
*
from
processingImages
import
*
from
data
import
*
from
experiment
import
*
import
numpy
as
np
import
numpy
as
np
import
skimage
import
os
import
argparse
import
argparse
def
dogfilterimage
(
image
,
sigma0
=
1.0
,
sigma1
=
6.0
):
"""
Difference of Gaussian image filtering
"""
if
len
(
image
.
shape
)
==
3
:
image
=
image
[:,
:,
0
]
s0
=
skimage
.
filters
.
gaussian
(
image
,
sigma0
)
s1
=
skimage
.
filters
.
gaussian
(
image
,
sigma1
)
dog
=
s0
-
s1
return
dog
def
cropimage
(
image
,
crop
):
"""
Crop Image
"""
y0
=
crop
[
1
]
y1
=
crop
[
3
]
x0
=
crop
[
0
]
x1
=
crop
[
2
]
return
image
[
y0
:
y1
,
x0
:
x1
]
def
getfilenames
(
filename
):
"""
returns list of filenames
"""
filenames
=
[
line
.
rstrip
(
'
\n
'
).
split
()
for
line
in
open
(
filename
)]
return
filenames
def
load_images
(
filenames
,
dogfilter
,
crop
,
test
):
"""
load_images: reads images into a numpy array
Arguments:
dogfilter: boolean flag indicating if DoG filtering is used
crop: array that specifies cropping bounds
(upper left corner x, upper left corner y, lower right corner x, lower right corner y).
If no cropping, then set to None.
test: Flag used to indicate testing mode. More specifically, during test mode no augmentation is applied.
When test is False, then image augmentation is used.
"""
n
=
len
(
filenames
)
# number of images
h
=
360
# image height
w
=
280
# image width
# change height and width if cropping bounds are specified
if
crop
is
not
None
:
h
=
crop
[
3
]
-
crop
[
1
]
w
=
crop
[
2
]
-
crop
[
0
]
# if test flag is False, then use image augmentation
# otherwise, no image augmentation is applied
if
test
is
False
:
images
=
np
.
zeros
((
2
*
n
,
h
,
w
))
else
:
images
=
np
.
zeros
((
n
,
h
,
w
))
for
i
,
f
in
enumerate
(
filenames
):
# read image
image
=
skimage
.
io
.
imread
(
f
)
# if training, do augmentation (blur image)
if
test
is
False
:
image_
=
skimage
.
filters
.
gaussian
(
image
,
1.0
)
# apply DoG filtering
if
dogfilter
is
True
:
if
test
is
False
:
image_
=
dogfilterimage
(
image_
)
image
=
dogfilterimage
(
image
)
# crop image
if
crop
is
not
None
:
if
test
is
False
:
image_
=
cropimage
(
image_
,
crop
)
image
=
cropimage
(
image
,
crop
)
# normalize image between [0, 255]
if
len
(
image
.
shape
)
==
3
:
if
test
is
False
:
images
[
2
*
i
,
:,
:]
=
(
image
[:,
:,
1
]
-
image
[:,
:,
1
].
min
())
/
(
image
[:,
:,
1
].
max
()
-
image
[:,
:,
1
].
min
())
*
255.0
images
[
2
*
i
+
1
,
:,
:]
=
(
image_
[:,
:,
1
]
-
image_
[:,
:,
1
].
min
())
/
(
image_
[:,
:,
1
].
max
()
-
image_
[:,
:,
1
].
min
())
*
255.0
else
:
images
[
i
,
:,
:]
=
(
image
[:,
:,
1
]
-
image
[:,
:,
1
].
min
())
/
(
image
[:,
:,
1
].
max
()
-
image
[:,
:,
1
].
min
())
*
255.0
else
:
if
test
is
False
:
images
[
2
*
i
,
:,
:]
=
(
image
-
image
.
min
())
/
(
image
.
max
()
-
image
.
min
())
*
255.0
images
[
2
*
i
+
1
,
:,
:]
=
(
image_
-
image_
.
min
())
/
(
image_
.
max
()
-
image_
.
min
())
*
255.0
else
:
if
image
.
min
()
==
image
.
max
():
print
(
'
{}) {}
'
.
format
(
i
,
os
.
path
.
basename
(
f
)),
end
=
"
\n
"
)
exit
()
else
:
images
[
i
,
:,
:]
=
(
image
-
image
.
min
())
/
(
image
.
max
()
-
image
.
min
())
*
255.0
# replicate grayscale images (Red, Green, and Blue Channels identical)
images
=
np
.
expand_dims
(
images
,
axis
=
3
)
images
=
np
.
tile
(
images
,
(
1
,
1
,
1
,
3
))
return
images
def
extract_resnet50_feature
(
img_data
,
model
):
"""
Use Pre-traing ResNet50 model to extract image features
"""
img_data
=
resnet_preprocess_input
(
img_data
,
backend
=
tf
.
keras
.
backend
,
layers
=
tf
.
keras
.
layers
,
models
=
tf
.
keras
.
models
,
utils
=
tf
.
keras
.
utils
)
resnet50_feature
=
model
.
predict
(
img_data
)
shape
=
resnet50_feature
.
shape
if
len
(
shape
)
==
4
:
keypts
=
np
.
array
([[
x
,
y
]
for
y
in
range
(
shape
[
1
])
for
x
in
range
(
shape
[
2
])]).
reshape
(
shape
[
1
],
shape
[
2
],
2
)
keypts
=
np
.
tile
(
keypts
,
[
shape
[
0
],
1
,
1
,
1
])
else
:
keypts
=
None
return
resnet50_feature
,
keypts
def
extract_vgg16_feature
(
img_data
,
model
):
"""
Use Pre-traing vgg16 model to extract image features
"""
img_data
=
vgg_preprocess_input
(
img_data
,
backend
=
tf
.
keras
.
backend
,
layers
=
tf
.
keras
.
layers
,
models
=
tf
.
keras
.
models
,
utils
=
tf
.
keras
.
utils
)
vgg16_feature
=
model
.
predict
(
img_data
)
shape
=
vgg16_feature
.
shape
if
len
(
shape
)
==
4
:
keypts
=
np
.
array
([[
x
,
y
]
for
y
in
range
(
shape
[
1
])
for
x
in
range
(
shape
[
2
])]).
reshape
(
shape
[
1
],
shape
[
2
],
2
)
keypts
=
np
.
tile
(
keypts
,
[
shape
[
0
],
1
,
1
,
1
])
else
:
keypts
=
None
return
vgg16_feature
,
keypts
def
extract_dsift
(
images
,
step
=
8
,
size
=
5
):
"""
extract Dense SIFT (DSIFT) image features
"""
n
=
images
.
shape
[
0
]
if
n
>
0
:
d
,
f
=
dsift
(
images
[
0
,
:,
:,
0
],
step
=
step
,
size
=
size
)
ndescr
,
descrl
=
f
.
shape
feats
=
np
.
zeros
((
n
,
ndescr
,
descrl
))
keypts
=
np
.
zeros
((
n
,
ndescr
,
2
))
for
i
in
range
(
n
):
d
,
f
=
dsift
(
images
[
i
,
:,
:,
0
],
step
=
step
,
size
=
size
)
feats
[
i
,
:,
:]
=
f
keypts
[
i
,
:,
:]
=
d
return
feats
,
keypts
def
get_image_patches
(
image
,
step
,
size
):
"""
extract image patches
"""
h
,
w
=
image
.
shape
# upper left coordinate for patches
xi
=
np
.
arange
(
0
,
w
-
size
+
1
,
step
)
yi
=
np
.
arange
(
0
,
h
-
size
+
1
,
step
)
# determine remaining size of image
xrem
=
w
-
(
xi
[
-
1
]
+
size
)
yrem
=
h
-
(
yi
[
-
1
]
+
size
)
# center patch coverage
xi
=
xi
+
int
(
np
.
floor
(
xrem
/
2
))
yi
=
yi
+
int
(
np
.
floor
(
yrem
/
2
))
ndescr
=
len
(
xi
)
*
len
(
yi
)
keypts
=
np
.
zeros
((
ndescr
,
2
))
descrs
=
np
.
zeros
((
ndescr
,
size
*
size
))
k
=
0
for
x
in
xi
:
for
y
in
yi
:
keypts
[
k
,
0
]
=
x
keypts
[
k
,
1
]
=
y
descrs
[
k
,:]
=
cropimage
(
image
,
[
x
,
y
,
x
+
20
,
y
+
20
]).
reshape
((
1
,
size
*
size
))
k
=
k
+
1
return
keypts
,
descrs
def
extract_patches
(
images
,
step
=
8
,
size
=
20
):
n
=
images
.
shape
[
0
]
if
n
>
0
:
d
,
f
=
get_image_patches
(
images
[
0
,
:,
:,
0
],
step
=
step
,
size
=
size
)
ndescr
,
descrl
=
f
.
shape
feats
=
np
.
zeros
((
n
,
ndescr
,
descrl
))
keypts
=
np
.
zeros
((
n
,
ndescr
,
2
))
for
i
in
range
(
n
):
d
,
f
=
get_image_patches
(
images
[
i
,
:,
:,
0
],
step
=
step
,
size
=
size
)
feats
[
i
,
:,
:]
=
f
keypts
[
i
,
:,
:]
=
d
return
feats
,
keypts
def
pca_reduce
(
feats
,
n_components
,
trans
=
None
,
scaler
=
None
):
"""
feature selection via Principle Components Analysis (PCA)
"""
x
=
feats
# PCA expects data to be scaled
if
scaler
is
None
:
scaler
=
StandardScaler
()
scaler
.
fit
(
x
)
x
=
scaler
.
transform
(
x
)
# applied PCA transformation
if
trans
is
None
:
trans
=
PCA
(
n_components
=
n_components
,
svd_solver
=
'
full
'
)
trans
.
fit
(
x
)
x
=
trans
.
transform
(
x
)
# return trans and scalar for use with test data
return
x
,
trans
,
scaler
def
protocol1_range1_baseline
(
imgroot
,
proto
):
filenames
=
getfilenames
(
proto
)
visfiles
=
np
.
array
(
filenames
)[:,
0
].
tolist
()
thmfiles
=
np
.
array
(
filenames
)[:,
1
].
tolist
()
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
return
visfiles
,
thmfiles
def
protocol1_range1_expr
(
imgroot
,
proto
):
filenames
=
getfilenames
(
proto
)
visfiles
=
np
.
array
(
filenames
)[:,
0
].
tolist
()
thmfiles
=
np
.
array
(
filenames
)[:,
1
].
tolist
()
ids
=
np
.
array
([
t
[
1
:
5
]
for
t
in
thmfiles
])
unique_ids
=
ids
[::
4
]
# there are 4 baseline images per subject
thmfiles
=
[
'
A
'
+
id
+
'
_R1_E_IOD083_S0_Sample{:02d}
'
.
format
(
samp
)
+
'
_01.png
'
for
id
in
unique_ids
for
samp
in
range
(
1
,
13
)]
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
return
visfiles
,
thmfiles
def
protocol1_range2_baseline
(
imgroot
,
proto
):
filenames
=
getfilenames
(
proto
)
visfiles
=
np
.
array
(
filenames
)[:,
0
].
tolist
()
thmfiles
=
np
.
array
(
filenames
)[:,
1
].
tolist
()
ids
=
np
.
array
([
t
[
1
:
5
]
for
t
in
thmfiles
])
unique_ids
=
ids
[::
4
]
# there are 4 baseline images per subject
thmfiles
=
[
'
A
'
+
id
+
'
_R2_B_IOD050_S0_Sample{:02d}
'
.
format
(
samp
)
+
'
_01.png
'
for
id
in
unique_ids
for
samp
in
range
(
1
,
5
)]
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
return
visfiles
,
thmfiles
def
protocol1_range2_expr
(
imgroot
,
proto
):
filenames
=
getfilenames
(
proto
)
visfiles
=
np
.
array
(
filenames
)[:,
0
].
tolist
()
thmfiles
=
np
.
array
(
filenames
)[:,
1
].
tolist
()
ids
=
np
.
array
([
t
[
1
:
5
]
for
t
in
thmfiles
])
unique_ids
=
ids
[::
4
]
# there are 4 baseline images per subject
thmfiles
=
[
'
A
'
+
id
+
'
_R2_E_IOD050_S0_Sample{:02d}
'
.
format
(
samp
)
+
'
_01.png
'
for
id
in
unique_ids
for
samp
in
range
(
1
,
13
)]
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
return
visfiles
,
thmfiles
def
protocol1_range3_baseline
(
imgroot
,
proto
):
filenames
=
getfilenames
(
proto
)
visfiles
=
np
.
array
(
filenames
)[:,
0
].
tolist
()
thmfiles
=
np
.
array
(
filenames
)[:,
1
].
tolist
()
ids
=
np
.
array
([
t
[
1
:
5
]
for
t
in
thmfiles
])
unique_ids
=
ids
[::
4
]
# there are 4 baseline images per subject
thmfiles
=
[
'
A
'
+
id
+
'
_R3_B_IOD031_S0_Sample{:02d}
'
.
format
(
samp
)
+
'
_01.png
'
for
id
in
unique_ids
for
samp
in
range
(
1
,
5
)]
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
return
visfiles
,
thmfiles
def
protocol1_range3_expr
(
imgroot
,
proto
):
filenames
=
getfilenames
(
proto
)
visfiles
=
np
.
array
(
filenames
)[:,
0
].
tolist
()
thmfiles
=
np
.
array
(
filenames
)[:,
1
].
tolist
()
ids
=
np
.
array
([
t
[
1
:
5
]
for
t
in
thmfiles
])
unique_ids
=
ids
[::
4
]
# there are 4 baseline images per subject
thmfiles
=
[
'
A
'
+
id
+
'
_R3_E_IOD031_S0_Sample{:02d}
'
.
format
(
samp
)
+
'
_01.png
'
for
id
in
unique_ids
for
samp
in
range
(
1
,
13
)]
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
return
visfiles
,
thmfiles
def
protocol3
(
imgroot
,
proto_vis
,
proto_thm
):
filenames_vis
=
getfilenames
(
proto_vis
)
filenames_thm
=
getfilenames
(
proto_thm
)
visfiles
=
np
.
array
(
filenames_vis
)[:,
0
].
tolist
()
thmfiles
=
np
.
array
(
filenames_thm
)[:,
0
].
tolist
()
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
return
visfiles
,
thmfiles
def
read_integers
(
filename
):
with
open
(
filename
)
as
f
:
return
np
.
asarray
([
int
(
x
)
for
x
in
f
])
def
eval
(
gallery
,
probe
,
imgs_per_subj
=
4
,
num_subjs
=
30
,
num_test_imgs
=
120
):
"""
Evaluate using Cosine Similarity Confusion matrix
"""
# cosine similarity
probe
=
probe
/
np
.
expand_dims
(
np
.
sqrt
(
np
.
sum
(
probe
**
2
,
axis
=
1
)),
axis
=
1
)
gallery
=
gallery
/
np
.
expand_dims
(
np
.
sqrt
(
np
.
sum
(
gallery
**
2
,
axis
=
1
)),
axis
=
1
)
C
=
np
.
matmul
(
gallery
,
probe
.
T
)
C
=
np
.
reshape
(
C
.
T
,
(
-
1
,
imgs_per_subj
))
C
=
np
.
amax
(
C
,
axis
=
1
).
reshape
(
num_subjs
,
num_test_imgs
)
#save C as numpy file using protocol and splits
#np.save(outFile, C)
pred
=
np
.
argmax
(
C
,
axis
=
0
).
reshape
(
num_test_imgs
,
1
)
gt
=
np
.
tile
(
np
.
expand_dims
(
np
.
arange
(
num_subjs
),
axis
=
1
),
(
int
(
num_test_imgs
/
num_subjs
),
1
))
acc
=
np
.
sum
(
np
.
array
(
pred
==
gt
,
dtype
=
'
float
'
))
/
float
(
num_test_imgs
)
return
acc
,
C
def
subsample_p3_train
(
protofile
,
labelfile
):
# read protocol file
fnames0
=
[
line
.
split
()[
0
]
for
line
in
open
(
protofile
,
'
r
'
)]
fnames1
=
[
line
.
split
()[
1
]
for
line
in
open
(
protofile
,
'
r
'
)]
labels
=
[
line
.
split
()[
0
]
for
line
in
open
(
labelfile
,
'
r
'
)]
assert
len
(
fnames0
)
==
len
(
labels
),
'
protocol file and label file must be the same length
'
# get ids
ids
=
[
f
[:
19
]
for
f
in
fnames0
]
fnames0_
=
[]
label_indices
=
[]
# for every unique id use up to 2 baseline, 12 expression, and 12 pose
for
id
in
np
.
unique
(
ids
):
cb
=
0
ce
=
0
cp
=
0
for
index
,
f
in
enumerate
(
fnames0
):
if
'
_b_
'
in
f
:
if
id
in
f
and
cb
<
2
:
fnames0_
.
append
(
f
)
label_indices
.
append
(
index
)
cb
+=
1
elif
'
_e_
'
in
f
:
if
id
in
f
and
ce
<
12
:
fnames0_
.
append
(
f
)
label_indices
.
append
(
index
)
ce
+=
1
elif
'
_p_
'
in
f
:
if
id
in
f
and
cp
<
12
:
fnames0_
.
append
(
f
)
label_indices
.
append
(
index
)
cp
+=
1
# new labels
labels
=
np
.
array
(
labels
)
labels_
=
labels
[
np
.
array
(
label_indices
,
dtype
=
int
)].
tolist
()
# get ids
ids
=
[
f
[:
19
]
for
f
in
fnames1
]
fnames1_
=
[]
# for every unique id use up to 2 baseline, 12 expression, and 12 pose
for
id
in
np
.
unique
(
ids
):
cb
=
0
ce
=
0
cp
=
0
for
f
in
fnames1
:
if
'
_b_
'
in
f
:
if
id
in
f
and
cb
<
2
:
fnames1_
.
append
(
f
)
cb
+=
1
elif
'
_e_
'
in
f
:
if
id
in
f
and
ce
<
12
:
fnames1_
.
append
(
f
)
ce
+=
1
elif
'
_p_
'
in
f
:
if
id
in
f
and
cp
<
12
:
fnames1_
.
append
(
f
)
cp
+=
1
return
fnames0_
,
fnames1_
,
labels_
def
inference
(
y
):
"""
transfer layer 1
"""
transfer1
=
tf
.
keras
.
layers
.
Conv2D
(
200
,
(
3
,
3
),
activation
=
'
tanh
'
,
padding
=
'
same
'
,
name
=
'
transfer1
'
)
"""
transfer layer 2
"""
transfer2
=
tf
.
keras
.
layers
.
Conv2D
(
200
,
(
3
,
3
),
activation
=
'
tanh
'
,
padding
=
'
same
'
,
name
=
'
transfer2
'
)
"""
bottleneck 2
"""
bottleneck2
=
tf
.
keras
.
layers
.
Conv2D
(
66
,
(
1
,
1
),
activation
=
'
tanh
'
,
padding
=
'
same
'
,
name
=
'
bottleneck2
'
)
"""
Transfer feature maps of size B x 25 x 25 x 256
"""
fy
=
transfer1
(
y
)
fy
=
transfer2
(
fy
)
fy
=
bottleneck2
(
fy
)
+
y
var_list1
=
transfer1
.
trainable_variables
+
transfer2
.
trainable_variables
+
bottleneck2
.
trainable_variables
return
fy
,
var_list1
def
loss
(
feats0
,
feats1
,
labels
,
labels_
,
loss_flag
,
var_list0
,
var_list1
):
"""
Add L2Loss to all the trainable variables.
Add summary for
"
Loss
"
and
"
Loss/avg
"
.
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
if
loss_flag
is
False
:
feats0
=
tf
.
keras
.
layers
.
Flatten
()(
feats0
)
feats1
=
tf
.
keras
.
layers
.
Flatten
()(
feats1
)
feats0_flat
=
feats0
feats1_flat
=
feats1
NC
=
96
wd
=
None
#feats0 = tf.keras.layers.Dropout(0.3)(feats0) # dropout
labels
=
tf
.
cast
(
labels
,
tf
.
int32
)
dim0
=
feats0
.
get_shape
()[
1
].
value
with
tf
.
variable_scope
(
'
softmax_linear
'
)
as
scope
:
dense
=
tf
.
keras
.
layers
.
Dense
(
NC
,
activation
=
None
,
name
=
'
softmax_linear
'
)
if
wd
is
not
None
:
weight_decay
=
tf
.
multiply
(
tf
.
nn
.
l2_loss
(
weights0
),
wd
,
name
=
'
weight_loss
'
)
logits00
=
dense
(
feats0
)
# visible (0) -> visible (0)
"""
Use same classifier on thermal features
"""
logits10
=
dense
(
feats1
)
# thermal (1) -> visible (0)
var_list0
=
var_list0
+
dense
.
trainable_variables
# only update weights here
var_list1
=
var_list1
cross_entropy00
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
labels
=
labels
,
logits
=
logits00
,
name
=
'
cross_entropy_per_example00
'
)
cross_entropy_mean00
=
tf
.
reduce_mean
(
cross_entropy00
,
name
=
'
cross_entropy00
'
)
cross_entropy_mean00
=
tf
.
multiply
(
7.5e-1
,
cross_entropy_mean00
,
name
=
'
weighted_cross_entropy00
'
)
cross_entropy10
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
labels
=
labels
,
logits
=
logits10
,
name
=
'
cross_entropy_per_example10
'
)
cross_entropy_mean10
=
tf
.
reduce_mean
(
cross_entropy10
,
name
=
'
cross_entropy10
'
)
cross_entropy_mean10
=
tf
.
multiply
(
7.5e-1
,
cross_entropy_mean10
,
name
=
'
weighted_cross_entropy10
'
)
if
wd
is
not
None
:
losses
=
[
cross_entropy_mean00
+
tf
.
reduce_mean
(
weight_decay
),
cross_entropy_mean10
]
else
:
losses
=
[
cross_entropy_mean00
,
cross_entropy_mean10
]
logits
=
[
logits00
,
logits10
]
var_lists
=
[
var_list0
,
var_list1
]
else
:
print
(
"
New cross domain identity loss
"
)
feats0
=
tf
.
keras
.
layers
.
Flatten
()(
feats0
)
feats1
=
tf
.
keras
.
layers
.
Flatten
()(
feats1
)
feats0_flat
=
feats0
feats1_flat
=
feats1
NC
=
96
wd
=
None
labels
=
tf
.
cast
(
labels
,
tf
.
int32
)
dim0
=
feats0
.
get_shape
()[
1
].
value
with
tf
.
variable_scope
(
'
softmax_linear
'
)
as
scope
:
dense
=
tf
.
keras
.
layers
.
Dense
(
NC
,
activation
=
None
,
name
=
'
softmax_linear
'
)
if
wd
is
not
None
:
weight_decay
=
tf
.
multiply
(
tf
.
nn
.
l2_loss
(
weights0
),
wd
,
name
=
'
weight_loss
'
)
logits00
=
dense
(
feats0
)
# visible (0) -> visible (0)
"""
Use same classifier on thermal features
"""
logits10
=
dense
(
feats1
)
# thermal (1) -> visible (0)
with
tf
.
variable_scope
(
'
softmax_linear1
'
)
as
scope
:
dense_
=
tf
.
keras
.
layers
.
Dense
(
2
,
activation
=
None
,
name
=
'
softmax_linear1
'
)
logits00_
=
dense_
(
feats0
)
# visible (0) -> visible (0)
"""
Use same classifier on thermal features
"""
logits10_
=
dense_
(
feats1
)
# thermal (1) -> visible (0)
var_list0
=
var_list0
+
dense
.
trainable_variables
+
dense_
.
trainable_variables
# only update weights here
var_list1
=
var_list1
+
dense_
.
trainable_variables
cross_entropy00
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
labels
=
labels
,
logits
=
logits00
,
name
=
'
cross_entropy_per_example00
'
)
cross_entropy_mean00
=
tf
.
reduce_mean
(
cross_entropy00
,
name
=
'
cross_entropy00
'
)
cross_entropy_mean00
=
tf
.
multiply
(
7.5e-1
,
cross_entropy_mean00
,
name
=
'
weighted_cross_entropy00
'
)
cross_entropy10
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
labels
=
labels
,
logits
=
logits10
,
name
=
'
cross_entropy_per_example10
'
)
cross_entropy_mean10
=
tf
.
reduce_mean
(
cross_entropy10
,
name
=
'
cross_entropy10
'
)
cross_entropy_mean10
=
tf
.
multiply
(
7.5e-1
,
cross_entropy_mean10
,
name
=
'
weighted_cross_entropy10
'
)
cross_entropy00_
=
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
labels_
,
logits
=
logits00_
,
name
=
'
cross_entropy_per_example00_
'
)
cross_entropy_mean00_
=
.
25
*
tf
.
reduce_mean
(
cross_entropy00_
,
name
=
'
cross_entropy00_
'
)
cross_entropy10_
=
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
labels_
,
logits
=
logits10_
,
name
=
'
cross_entropy_per_example10_
'
)
cross_entropy_mean10_
=
.
25
*
tf
.
reduce_mean
(
cross_entropy10_
,
name
=
'
cross_entropy10_
'
)
if
wd
is
not
None
:
losses
=
[
cross_entropy_mean00
+
cross_entropy_mean00_
+
tf
.
reduce_mean
(
weight_decay
),
cross_entropy_mean10
+
cross_entropy_mean10_
]
else
:
losses
=
[
cross_entropy_mean00
+
cross_entropy_mean00_
,
cross_entropy_mean10
+
cross_entropy_mean10_
]
logits
=
[
logits00
,
logits10
]
var_lists
=
[
var_list0
,
var_list1
]
return
losses
,
logits
,
var_lists
,
feats0_flat
,
feats1_flat
def
save_best_model
(
i
,
model_type
,
sess
,
saver
):
if
i
==
0
:
if
model_type
==
'
resnet50
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/resnet/00/my_model_res_00
'
)
elif
model_type
==
'
vgg16
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/vgg/00/my_model_res_00
'
)
elif
i
==
1
:
if
model_type
==
'
resnet50
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/resnet/01/my_model_res_01
'
)
elif
model_type
==
'
vgg16
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/vgg/01/my_model_res_01
'
)
elif
i
==
2
:
if
model_type
==
'
resnet50
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/resnet/02/my_model_res_02
'
)
elif
model_type
==
'
vgg16
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/vgg/02/my_model_res_02
'
)
elif
i
==
3
:
if
model_type
==
'
resnet50
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/resnet/03/my_model_res_03
'
)
elif
model_type
==
'
vgg16
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/vgg/03/my_model_res_03
'
)
elif
i
==
4
:
if
model_type
==
'
resnet50
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/resnet/04/my_model_res_04
'
)
elif
model_type
==
'
vgg16
'
:
saver
.
save
(
sess
,
'
./saved_models/p2_v2t/vgg/04/my_model_res_04
'
)
def
load_best_model
(
i
,
model_type
):
if
i
==
0
:
if
model_type
==
'
resnet50
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/resnet/00/
'
elif
model_type
==
'
vgg16
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/vgg/00/
'
elif
i
==
1
:
if
model_type
==
'
resnet50
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/resnet/01/
'
elif
model_type
==
'
vgg16
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/vgg/01/
'
elif
i
==
2
:
if
model_type
==
'
resnet50
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/resnet/02/
'
elif
model_type
==
'
vgg16
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/vgg/02/
'
elif
i
==
3
:
if
model_type
==
'
resnet50
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/resnet/03/
'
elif
model_type
==
'
vgg16
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/vgg/03/
'
elif
i
==
4
:
if
model_type
==
'
resnet50
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/resnet/04/
'
elif
model_type
==
'
vgg16
'
:
checkpoint_path
=
'
./saved_models/p2_v2t/vgg/04/
'
return
checkpoint_path
def
get_protocols
(
i
,
protocol
):
protofile_
,
galleryProtofile_
,
probesProtofile_
=
[],
[],
[]
# imgroot- location of the image files.
# protofile - protocol files used for training and testing.
if
"
protocol1
"
in
protocol
:
imgroot
=
'
/home/ironman/data/ARL/ARL_Polarimetric_Data/ARL_Polarimetric_Database_111Subject_Database_updated_2019/
'
protofile
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol1_ijcb/train/1_{:02d}.txt
'
.
format
(
i
)
protofile_
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol1_ijcb/test/1_{:02d}.txt
'
.
format
(
i
)
label
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol1_ijcb/train_labels/1_{:02d}_labels.txt
'
.
format
(
i
)
elif
"
protocol2
"
in
protocol
:
imgroot
=
'
/home/ironman/data/ARL/ARL_Polarimetric_Data/ARL_Polarimetric_Database_111Subject_Database_updated_2019/
'
protofile
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol2_ijcb/train/2_{:02d}.txt
'
.
format
(
i
)
protofile_
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol2_ijcb/test/2_{:02d}.txt
'
.
format
(
i
)
label
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol2_ijcb/train_labels/2_{:02d}_labels.txt
'
.
format
(
i
)
elif
"
protocol3_b
"
in
protocol
:
imgroot
=
'
/home/ironman/data/ARL/2018_May_OdinData/
'
protofile
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/b_no_glasses/train/{:02d}_train.txt
'
.
format
(
i
)
galleryProtofile_
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/b_no_glasses/test/{:02d}_gallery.txt
'
.
format
(
i
)
probesProtofile_
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/b_no_glasses/test/{:02d}_probes.txt
'
.
format
(
i
)
label
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/b_no_glasses/train_labels/{:02d}_train_labels.txt
'
.
format
(
i
)
elif
"
protocol3_e
"
in
protocol
:
imgroot
=
'
/home/ironman/data/ARL/2018_May_OdinData/
'
protofile
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/e_no_glasses/train/{:02d}_train.txt
'
.
format
(
i
)
galleryProtofile_
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/e_no_glasses/test/{:02d}_gallery.txt
'
.
format
(
i
)
probesProtofile_
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/e_no_glasses/test/{:02d}_probes.txt
'
.
format
(
i
)
label
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/e_no_glasses/train_labels/{:02d}_train_labels.txt
'
.
format
(
i
)
elif
"
protocol3_p
"
in
protocol
:
imgroot
=
'
/home/ironman/data/ARL/2018_May_OdinData/
'
protofile
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/p_no_glasses/train/{:02d}_train.txt
'
.
format
(
i
)
galleryProtofile_
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/p_no_glasses/test/{:02d}_gallery.txt
'
.
format
(
i
)
probesProtofile_
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/p_no_glasses/test/{:02d}_probes.txt
'
.
format
(
i
)
label
=
'
/home/ironman/data/ARL/code/Thm2Vis_DNN_Code/protocol3_ijcb/p_no_glasses/train_labels/{:02d}_train_labels.txt
'
.
format
(
i
)
return
imgroot
,
protofile
,
protofile_
,
galleryProtofile_
,
probesProtofile_
,
label
def
run_experiment
(
imgroot
,
model_type
,
layer_name
=
None
,
pooling
=
None
,
pca_flag
=
False
,
dogfilter
=
True
,
crop
=
[
39
,
123
,
239
,
323
],
DPM
=
False
,
protocol
=
None
,
Proposed
=
False
,
loss_flag
=
False
,
train_flag
=
False
):
# Get model
if
model_type
==
'
vgg16
'
:
model
=
VGG16
(
weights
=
'
imagenet
'
,
include_top
=
False
,
pooling
=
pooling
,
backend
=
tf
.
keras
.
backend
,
layers
=
tf
.
keras
.
layers
,
models
=
tf
.
keras
.
models
,
utils
=
tf
.
keras
.
utils
)
model
.
trainable
=
False
#model.summary()
elif
model_type
==
'
resnet50
'
:
model
=
ResNet50
(
weights
=
'
imagenet
'
,
include_top
=
False
,
pooling
=
pooling
,
backend
=
tf
.
keras
.
backend
,
layers
=
tf
.
keras
.
layers
,
models
=
tf
.
keras
.
models
,
utils
=
tf
.
keras
.
utils
)
model
.
trainable
=
False
#model.summary()
if
layer_name
is
not
None
:
inter_model
=
tf
.
keras
.
Model
(
inputs
=
model
.
input
,
outputs
=
model
.
get_layer
(
layer_name
).
output
)
acc
=
[]
# Train accuracy
acc_
=
[]
#Test accuracy
count
=
0
for
iter
in
range
(
5
):
#Get protocol files
imgroot
,
protofile
,
protofile_
,
galleryProtofile_
,
probesProtofile_
,
label
=
get_protocols
(
iter
,
protocol
)
#Get training filenames
if
"
protocol1
"
in
protocol
or
"
protocol2
"
in
protocol
:
filenames
=
getfilenames
(
protofile
)
visfiles
=
np
.
array
(
filenames
)[:,
0
].
tolist
()
thmfiles
=
np
.
array
(
filenames
)[:,
1
].
tolist
()
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
labels_tr
=
read_integers
(
label
)
elif
"
protocol3
"
in
protocol
:
visfiles
,
thmfiles
,
labels_tr
=
subsample_p3_train
(
protofile
,
label
)
labels_tr
=
np
.
asarray
([
int
(
x
)
for
x
in
labels_tr
])
visfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredvisible
'
,
v
)
for
v
in
visfiles
]
thmfiles
=
[
os
.
path
.
join
(
imgroot
,
'
Registeredthermal
'
,
v
)
for
v
in
thmfiles
]
# Load training set
vi
=
load_images
(
visfiles
,
dogfilter
=
dogfilter
,
crop
=
crop
,
test
=
True
)
s0
=
load_images
(
thmfiles
,
dogfilter
=
dogfilter
,
crop
=
crop
,
test
=
True
)
ratio
=
vi
.
shape
[
0
]
/
(
s0
.
shape
[
0
]
+
vi
.
shape
[
0
])
N
=
len
(
labels_tr
)
# Get testing filenames
if
protocol
==
'
protocol1_r1_b
'
or
protocol
==
'
protocol2_r1_b
'
:
visfiles_
,
thmfiles_
=
protocol1_range1_baseline
(
imgroot
,
protofile_
)
elif
protocol
==
'
protocol1_r1_e
'
or
protocol
==
'
protocol2_r1_e
'
:
visfiles_
,
thmfiles_
=
protocol1_range1_expr
(
imgroot
,
protofile_
)
elif
protocol
==
'
protocol1_r2_b
'
or
protocol
==
'
protocol2_r2_b
'
:
visfiles_
,
thmfiles_
=
protocol1_range2_baseline
(
imgroot
,
protofile_
)
elif
protocol
==
'
protocol1_r2_e
'
or
protocol
==
'
protocol2_r2_e
'
:
visfiles_
,
thmfiles_
=
protocol1_range2_expr
(
imgroot
,
protofile_
)
elif
protocol
==
'
protocol1_r3_b
'
or
protocol
==
'
protocol2_r3_b
'
:
visfiles_
,
thmfiles_
=
protocol1_range3_baseline
(
imgroot
,
protofile_
)
elif
protocol
==
'
protocol1_r3_e
'
or
protocol
==
'
protocol2_r3_e
'
:
visfiles_
,
thmfiles_
=
protocol1_range3_expr
(
imgroot
,
protofile_
)
elif
'
protocol3
'
in
protocol
:
visfiles_
,
thmfiles_
=
protocol3
(
imgroot
,
galleryProtofile_
,
probesProtofile_
)
# Load testing set
vi_
=
load_images
(
visfiles_
,
dogfilter
=
dogfilter
,
crop
=
crop
,
test
=
True
)
s0_
=
load_images
(
thmfiles_
,
dogfilter
=
dogfilter
,
crop
=
crop
,
test
=
True
)
ratio_
=
vi_
.
shape
[
0
]
/
(
s0_
.
shape
[
0
]
+
vi_
.
shape
[
0
])
if
model_type
==
'
dsift
'
:
feats_size
=
24
vif
,
vik
=
extract_dsift
(
vi
)
s0f
,
s0k
=
extract_dsift
(
s0
)
vif_
,
vik_
=
extract_dsift
(
vi_
)
s0f_
,
s0k_
=
extract_dsift
(
s0_
)
elif
model_type
==
'
patch
'
:
vif
,
vik
=
extract_dsift
(
vi
)
s0f
,
s0k
=
extract_dsift
(
s0
)
vif_
,
vik_
=
extract_dsift
(
vi_
)
s0f_
,
s0k_
=
extract_dsift
(
s0_
)
elif
model_type
==
'
vgg16
'
:
feats_size
=
25
if
layer_name
is
None
:
vif
,
vik
=
extract_vgg16_feature
(
vi
,
model
)
s0f
,
s0k
=
extract_vgg16_feature
(
s0
,
model
)
vif_
,
vik_
=
extract_vgg16_feature
(
vi_
,
model
)
s0f_
,
s0k_
=
extract_vgg16_feature
(
s0_
,
model
)
else
:
vif
,
vik
=
extract_vgg16_feature
(
vi
,
inter_model
)
s0f
,
s0k
=
extract_vgg16_feature
(
s0
,
inter_model
)
vif_
,
vik_
=
extract_vgg16_feature
(
vi_
,
inter_model
)
s0f_
,
s0k_
=
extract_vgg16_feature
(
s0_
,
inter_model
)
elif
model_type
==
'
resnet50
'
:
feats_size
=
25
if
layer_name
is
None
:
vif
,
vik
=
extract_resnet50_feature
(
vi
,
model
)
s0f
,
s0k
=
extract_resnet50_feature
(
s0
,
model
)
vif_
,
vik_
=
extract_resnet50_feature
(
vi_
,
model
)
s0f_
,
s0k_
=
extract_resnet50_feature
(
s0_
,
model
)
else
:
vif
,
vik
=
extract_resnet50_feature
(
vi
,
inter_model
)
s0f
,
s0k
=
extract_resnet50_feature
(
s0
,
inter_model
)
vif_
,
vik_
=
extract_resnet50_feature
(
vi_
,
inter_model
)
s0f_
,
s0k_
=
extract_resnet50_feature
(
s0_
,
inter_model
)
# pca
if
pca_flag
is
True
:
shape
=
vif
.
shape
# reshape training features
vif
=
vif
.
reshape
(
-
1
,
shape
[
-
1
])
s0f
=
s0f
.
reshape
(
-
1
,
shape
[
-
1
])
feats
=
np
.
concatenate
((
vif
,
s0f
),
axis
=
0
)
feats
,
trans
,
scaler
=
pca_reduce
(
feats
,
n_components
=
64
,
trans
=
None
,
scaler
=
None
)
M
=
int
(
feats
.
shape
[
0
]
*
ratio
)
vif
=
feats
[:
M
,
:]
s0f
=
feats
[
M
:,
:]
shape
=
vif_
.
shape
# reshape testing features
vif_
=
vif_
.
reshape
(
-
1
,
shape
[
-
1
])
s0f_
=
s0f_
.
reshape
(
-
1
,
shape
[
-
1
])
feats_
=
np
.
concatenate
((
vif_
,
s0f_
),
axis
=
0
)
feats_
,
trans_
,
scaler_
=
pca_reduce
(
feats_
,
n_components
=
64
,
trans
=
trans
,
scaler
=
scaler
)
M
=
int
(
feats_
.
shape
[
0
]
*
ratio_
)
vif_
=
feats_
[:
M
,
:]
s0f_
=
feats_
[
M
:,
:]
# Deep Perceptural Mapping
if
DPM
is
True
:
#Run DPM on training data
shape
=
vif
.
shape
vif
=
vif
.
reshape
(
-
1
,
shape
[
-
1
])
s0f
=
s0f
.
reshape
(
-
1
,
shape
[
-
1
])
# reshape keypts
if
vik
is
not
None
:
if
s0k
is
not
None
:
vik
=
vik
.
reshape
(
-
1
,
2
)
s0k
=
s0k
.
reshape
(
-
1
,
2
)
# concat keypts
vif
=
np
.
concatenate
((
vif
,
vik
),
axis
=
1
)
s0f
=
np
.
concatenate
((
s0f
,
s0k
),
axis
=
1
)
# min-max normalization
vmin
=
np
.
expand_dims
(
np
.
amin
(
vif
,
axis
=
0
),
axis
=
1
).
T
vmax
=
np
.
expand_dims
(
np
.
amax
(
vif
,
axis
=
0
),
axis
=
1
).
T
smin
=
np
.
expand_dims
(
np
.
amin
(
s0f
,
axis
=
0
),
axis
=
1
).
T
smax
=
np
.
expand_dims
(
np
.
amax
(
s0f
,
axis
=
0
),
axis
=
1
).
T
vif
=
(
vif
-
vmin
)
/
(
vmax
-
vmin
)
*
2.0
-
1.0
s0f
=
(
s0f
-
smin
)
/
(
smax
-
smin
)
*
2.0
-
1.0
# train the dpm network
nn
=
train_dpm
(
s0f
,
vif
)
# map features
tmp
=
np
.
zeros_like
(
s0f
)
nn
=
nnff
(
nn
,
s0f
,
tmp
)
s0f
=
nn
[
'
a
'
][
-
1
]
#Run DPM on testing data
shape
=
vif_
.
shape
vif_
=
vif_
.
reshape
(
-
1
,
shape
[
-
1
])
s0f_
=
s0f_
.
reshape
(
-
1
,
shape
[
-
1
])
# reshape keypts
if
vik_
is
not
None
:
if
s0k_
is
not
None
:
vik_
=
vik_
.
reshape
(
-
1
,
2
)
s0k_
=
s0k_
.
reshape
(
-
1
,
2
)
# concat keypts
vif_
=
np
.
concatenate
((
vif_
,
vik_
),
axis
=
1
)
s0f_
=
np
.
concatenate
((
s0f_
,
s0k_
),
axis
=
1
)
# min-max normalize
vmin
=
np
.
expand_dims
(
np
.
amin
(
vif_
,
axis
=
0
),
axis
=
1
).
T
vmax
=
np
.
expand_dims
(
np
.
amax
(
vif_
,
axis
=
0
),
axis
=
1
).
T
smin
=
np
.
expand_dims
(
np
.
amin
(
s0f_
,
axis
=
0
),
axis
=
1
).
T
smax
=
np
.
expand_dims
(
np
.
amax
(
s0f_
,
axis
=
0
),
axis
=
1
).
T
vif_
=
(
vif_
-
vmin
)
/
(
vmax
-
vmin
)
*
2.0
-
1.0
s0f_
=
(
s0f_
-
smin
)
/
(
smax
-
smin
)
*
2.0
-
1.0
#map features
tmp
=
np
.
zeros_like
(
s0f_
)
nn
=
nnff
(
nn
,
s0f_
,
tmp
)
s0f_
=
nn
[
'
a
'
][
-
1
]
vif
=
vif
.
reshape
(
vi
.
shape
[
0
],
-
1
)
s0f
=
s0f
.
reshape
(
s0
.
shape
[
0
],
-
1
)
vif_
=
vif_
.
reshape
(
vi_
.
shape
[
0
],
-
1
)
s0f_
=
s0f_
.
reshape
(
s0_
.
shape
[
0
],
-
1
)
accu_
,
C
=
eval
(
vif_
,
s0f_
,
imgs_per_subj
=
4
,
num_subjs
=
30
,
num_test_imgs
=
s0_
.
shape
[
0
])
acc_
.
append
(
accu_
)
elif
Proposed
is
True
:
# reshape
shape
=
vif
.
shape
vif
=
vif
.
reshape
(
-
1
,
shape
[
-
1
])
s0f
=
s0f
.
reshape
(
-
1
,
shape
[
-
1
])
# reshape keypts
if
vik
is
not
None
:
if
s0k
is
not
None
:
vik
=
vik
.
reshape
(
-
1
,
2
)
s0k
=
s0k
.
reshape
(
-
1
,
2
)
# concat keypts
vif
=
np
.
concatenate
((
vif
,
vik
),
axis
=
1
)
s0f
=
np
.
concatenate
((
s0f
,
s0k
),
axis
=
1
)
# min-max normalization
vmin
=
np
.
expand_dims
(
np
.
amin
(
vif
,
axis
=
0
),
axis
=
1
).
T
vmax
=
np
.
expand_dims
(
np
.
amax
(
vif
,
axis
=
0
),
axis
=
1
).
T
smin
=
np
.
expand_dims
(
np
.
amin
(
s0f
,
axis
=
0
),
axis
=
1
).
T
smax
=
np
.
expand_dims
(
np
.
amax
(
s0f
,
axis
=
0
),
axis
=
1
).
T
vif
=
(
vif
-
vmin
)
/
(
vmax
-
vmin
)
*
2.0
-
1.0
s0f
=
(
s0f
-
smin
)
/
(
smax
-
smin
)
*
2.0
-
1.0
# reshape
shape
=
vif_
.
shape
vif_
=
vif_
.
reshape
(
-
1
,
shape
[
-
1
])
s0f_
=
s0f_
.
reshape
(
-
1
,
shape
[
-
1
])
# reshape keypts
if
vik_
is
not
None
:
if
s0k_
is
not
None
:
vik_
=
vik_
.
reshape
(
-
1
,
2
)
s0k_
=
s0k_
.
reshape
(
-
1
,
2
)
# concat keypts
vif_
=
np
.
concatenate
((
vif_
,
vik_
),
axis
=
1
)
s0f_
=
np
.
concatenate
((
s0f_
,
s0k_
),
axis
=
1
)
# min-max normalization
vmin
=
np
.
expand_dims
(
np
.
amin
(
vif_
,
axis
=
0
),
axis
=
1
).
T
vmax
=
np
.
expand_dims
(
np
.
amax
(
vif_
,
axis
=
0
),
axis
=
1
).
T
smin
=
np
.
expand_dims
(
np
.
amin
(
s0f_
,
axis
=
0
),
axis
=
1
).
T
smax
=
np
.
expand_dims
(
np
.
amax
(
s0f_
,
axis
=
0
),
axis
=
1
).
T
vif_
=
(
vif_
-
vmin
)
/
(
vmax
-
vmin
)
*
2.0
-
1.0
s0f_
=
(
s0f_
-
smin
)
/
(
smax
-
smin
)
*
2.0
-
1.0
vif
=
vif
.
reshape
((
N
,
feats_size
,
feats_size
,
66
))
s0f
=
s0f
.
reshape
((
N
,
feats_size
,
feats_size
,
66
))
vif_
=
vif_
.
reshape
((
len
(
vi_
),
feats_size
,
feats_size
,
66
))
s0f_
=
s0f_
.
reshape
((
len
(
s0_
),
feats_size
,
feats_size
,
66
))
# training parameters
batch_size
=
16
nsteps
=
1500
# number of total steps
nsteps_
=
100
# number of steps to train visible
if
model_type
==
'
dsift
'
:
"""
build graph
"""
x
=
tf
.
placeholder
(
dtype
=
tf
.
float32
,
shape
=
(
None
,
24
,
24
,
64
+
2
))
y
=
tf
.
placeholder
(
dtype
=
tf
.
float32
,
shape
=
(
None
,
24
,
24
,
64
+
2
))
labels
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
None
))
labels_
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
None
,
2
))
else
:
"""
build graph
"""
x
=
tf
.
placeholder
(
dtype
=
tf
.
float32
,
shape
=
(
None
,
25
,
25
,
64
+
2
))
y
=
tf
.
placeholder
(
dtype
=
tf
.
float32
,
shape
=
(
None
,
25
,
25
,
64
+
2
))
labels
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
None
))
labels_
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
None
,
2
))
fy
,
var_list1
=
inference
(
y
)
losses
,
logits
,
var_lists
,
feats0_flat
,
feats1_flat
=
loss
(
x
,
fy
,
labels
,
labels_
,
loss_flag
,
var_list0
=
[],
var_list1
=
var_list1
)
opt0
=
tf
.
train
.
AdamOptimizer
(
1e-3
)
opt1
=
tf
.
train
.
AdamOptimizer
(
1e-3
)
train_op0
=
opt0
.
minimize
(
losses
[
0
],
var_list
=
var_lists
[
0
])
train_op1
=
opt1
.
minimize
(
losses
[
1
],
var_list
=
var_lists
[
1
])
init_op
=
tf
.
initialize_variables
(
var_lists
[
0
]
+
var_lists
[
1
]
+
opt0
.
variables
()
+
opt1
.
variables
())
# start session
sess
=
tf
.
keras
.
backend
.
get_session
()
tf
.
keras
.
backend
.
set_session
(
sess
)
saver
=
tf
.
train
.
Saver
()
best
=
-
1
*
np
.
ones
((
5
,))
best_step
=
-
1
*
np
.
ones
((
5
,))
start
=
-
1
*
np
.
ones
((
5
,))
if
train_flag
is
True
:
# train the network
sess
.
run
(
init_op
)
print
(
"
count =
"
,
count
)
for
step
in
range
(
nsteps
):
# evaluate
if
step
%
1
==
0
:
# batch process gallery
M
=
vif_
.
shape
[
0
]
num_test_batches
=
int
(
M
/
batch_size
)
rem
=
M
%
batch_size
gallery
=
[]
for
i
in
range
(
num_test_batches
):
gi
=
sess
.
run
(
fy
,
feed_dict
=
{
y
:
vif_
[
i
*
batch_size
:(
i
+
1
)
*
batch_size
,
:,
:,
:],
tf
.
keras
.
backend
.
learning_phase
():
0
})
gallery
.
append
(
gi
)
if
rem
>
0
:
gi
=
sess
.
run
(
fy
,
feed_dict
=
{
y
:
vif_
[
num_test_batches
*
batch_size
:
num_test_batches
*
batch_size
+
rem
,
:,
:,
:],
tf
.
keras
.
backend
.
learning_phase
():
0
})
gallery
.
append
(
gi
)
gallery
=
np
.
concatenate
(
gallery
,
axis
=
0
)
gallery
=
gallery
.
reshape
(
vif_
.
shape
[
0
],
-
1
)
# batch process probes
M
=
s0f_
.
shape
[
0
]
num_test_batches
=
int
(
M
/
batch_size
)
rem
=
M
%
batch_size
probe
=
[]
for
i
in
range
(
num_test_batches
):
pi
=
sess
.
run
(
feats0_flat
,
feed_dict
=
{
x
:
s0f_
[
i
*
batch_size
:(
i
+
1
)
*
batch_size
,
:,
:,
:],
tf
.
keras
.
backend
.
learning_phase
():
0
})
probe
.
append
(
pi
)
if
rem
>
0
:
pi
=
sess
.
run
(
feats0_flat
,
feed_dict
=
{
x
:
s0f_
[
num_test_batches
*
batch_size
:
num_test_batches
*
batch_size
+
rem
,
:,
:,
:],
tf
.
keras
.
backend
.
learning_phase
():
0
})
probe
.
append
(
pi
)
probe
=
np
.
concatenate
(
probe
,
axis
=
0
)
probe
=
probe
.
reshape
(
s0f_
.
shape
[
0
],
-
1
)
#Generate cosine similarity filenames
if
"
protocol1
"
in
protocol
or
"
protocol2
"
in
protocol
:
cos_similarity
=
os
.
path
.
join
(
'
cos_sim_v2t_
'
+
protocol
+
'
_
'
+
model_type
+
'
_
'
+
os
.
path
.
basename
(
protofile_
)[:
4
]
+
'
_prop
'
+
'
.npy
'
)
elif
"
protocol3
"
in
protocol
:
cos_similarity
=
os
.
path
.
join
(
'
cos_sim_v2t_
'
+
protocol
+
'
_
'
+
model_type
+
'
_
'
+
os
.
path
.
basename
(
galleryProtofile_
)[:
6
]
+
'
_prop
'
+
'
.npy
'
)
acc_
,
C
=
eval
(
gallery
,
probe
,
imgs_per_subj
=
4
,
num_subjs
=
30
,
num_test_imgs
=
probe
.
shape
[
0
])
if
step
==
0
:
start
[
count
]
=
acc_
# Save the best model
if
acc_
>
best
[
count
]:
np
.
save
(
cos_similarity
,
C
)
best
[
count
]
=
acc_
best_step
[
count
]
=
step
save_best_model
(
iter
,
model_type
,
sess
,
saver
)
print
(
'
step = {}, acc_ = {}, init_acc = {}, best_acc = {}, best_step = {}
'
.
format
(
step
,
acc_
,
start
[
count
],
best
[
count
],
best_step
[
count
]))
# shuffle data
ii
=
np
.
random
.
permutation
(
N
)
#N = length of the training set
vif
=
vif
[
ii
,
:,
:,
:]
s0f
=
s0f
[
ii
,
:,
:,
:]
labels_tr
=
labels_tr
[
ii
]
vi_batch
=
vif
[:
batch_size
,
:,
:,
:]
s0_batch
=
s0f
[:
batch_size
,
:,
:,
:]
labels_batch
=
labels_tr
[:
batch_size
]
labels_batch_
=
0.5
*
np
.
ones
((
batch_size
,
2
))
if
step
<
nsteps_
:
_
,
L
=
sess
.
run
([
train_op0
,
losses
],
feed_dict
=
{
y
:
vi_batch
,
x
:
s0_batch
,
labels
:
labels_batch
,
labels_
:
labels_batch_
,
tf
.
keras
.
backend
.
learning_phase
():
1
})
else
:
_
,
L
=
sess
.
run
([
train_op1
,
losses
],
feed_dict
=
{
y
:
vi_batch
,
x
:
s0_batch
,
labels
:
labels_batch
,
labels_
:
labels_batch_
,
tf
.
keras
.
backend
.
learning_phase
():
1
})
print
(
'
{} {} {}
'
.
format
(
step
,
L
[
0
],
L
[
1
]))
acc_
=
best
[
count
]
count
=
count
+
1
else
:
print
(
"
Loading pretrained network...
"
)
checkpoint_path
=
load_best_model
(
iter
,
model_type
)
saver
.
restore
(
sess
,
tf
.
train
.
latest_checkpoint
(
checkpoint_path
)
)
# batch process gallery
M
=
vif_
.
shape
[
0
]
num_test_batches
=
int
(
M
/
batch_size
)
rem
=
M
%
batch_size
gallery
=
[]
for
i
in
range
(
num_test_batches
):
gi
=
sess
.
run
(
fy
,
feed_dict
=
{
y
:
vif_
[
i
*
batch_size
:(
i
+
1
)
*
batch_size
,
:,
:,
:],
tf
.
keras
.
backend
.
learning_phase
():
0
})
gallery
.
append
(
gi
)
if
rem
>
0
:
gi
=
sess
.
run
(
fy
,
feed_dict
=
{
y
:
vif_
[
num_test_batches
*
batch_size
:
num_test_batches
*
batch_size
+
rem
,
:,
:,
:],
tf
.
keras
.
backend
.
learning_phase
():
0
})
gallery
.
append
(
gi
)
gallery
=
np
.
concatenate
(
gallery
,
axis
=
0
)
gallery
=
gallery
.
reshape
(
vif_
.
shape
[
0
],
-
1
)
# batch process probes
M
=
s0f_
.
shape
[
0
]
num_test_batches
=
int
(
M
/
batch_size
)
rem
=
M
%
batch_size
probe
=
[]
for
i
in
range
(
num_test_batches
):
pi
=
sess
.
run
(
feats0_flat
,
feed_dict
=
{
x
:
s0f_
[
i
*
batch_size
:(
i
+
1
)
*
batch_size
,
:,
:,
:],
tf
.
keras
.
backend
.
learning_phase
():
0
})
probe
.
append
(
pi
)
if
rem
>
0
:
pi
=
sess
.
run
(
feats0_flat
,
feed_dict
=
{
x
:
s0f_
[
num_test_batches
*
batch_size
:
num_test_batches
*
batch_size
+
rem
,
:,
:,
:],
tf
.
keras
.
backend
.
learning_phase
():
0
})
probe
.
append
(
pi
)
probe
=
np
.
concatenate
(
probe
,
axis
=
0
)
probe
=
probe
.
reshape
(
s0f_
.
shape
[
0
],
-
1
)
if
"
protocol1
"
in
protocol
or
"
protocol2
"
in
protocol
:
cos_similarity
=
os
.
path
.
join
(
'
cos_sim_v2t_
'
+
protocol
+
'
_
'
+
model_type
+
'
_
'
+
os
.
path
.
basename
(
protofile_
)[:
4
]
+
'
_prop
'
+
'
.npy
'
)
elif
"
protocol3
"
in
protocol
:
cos_similarity
=
os
.
path
.
join
(
'
cos_sim_v2t_
'
+
protocol
+
'
_
'
+
model_type
+
'
_
'
+
os
.
path
.
basename
(
galleryProtofile_
)[:
6
]
+
'
_prop
'
+
'
.npy
'
)
accu_
,
C
=
eval
(
gallery
,
probe
,
imgs_per_subj
=
4
,
num_subjs
=
30
,
num_test_imgs
=
probe
.
shape
[
0
])
np
.
save
(
cos_similarity
,
C
)
acc_
.
append
(
accu_
)
print
(
"
acc_ =
"
,
acc_
)
#exit()
return
acc
,
acc_
if
__name__
==
'
__main__
'
:
if
__name__
==
'
__main__
'
:
# command line argument parser
# command line argument parser
parser
=
argparse
.
ArgumentParser
()
parser
=
argparse
.
ArgumentParser
()
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment