Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
7
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
sec2.1_ml
deeplearning
Commits
b4885bf1
Commit
b4885bf1
authored
Jul 22, 2018
by
Marius Kriegerowski
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
improve performance
parent
6172e455
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
25 additions
and
24 deletions
+25
-24
pinky/README.md
pinky/README.md
+1
-0
pinky/src/model.py
pinky/src/model.py
+24
-24
No files found.
pinky/README.md
View file @
b4885bf1
...
...
@@ -2,6 +2,7 @@ Prereqs
-------
-
tensorflow
-
scikit-optimize
-
git clone git@github.com:HerrMuellerluedenscheid/swarming.git
Invoke
...
...
pinky/src/model.py
View file @
b4885bf1
...
...
@@ -25,7 +25,7 @@ class Model(Object):
shuffle_size
=
Int
.
T
(
optional
=
True
,
help
=
'if set, shuffle examples at given buffer size.'
)
def
__call__
(
self
,
tf_config
=
None
):
def
__call__
(
self
,
tf_config
=
None
,
debug
=
False
):
if
self
.
auto_clear
and
os
.
path
.
exists
(
self
.
summary_outdir
):
logger
.
info
(
'deleting directory: %s'
%
self
.
summary_outdir
)
...
...
@@ -33,9 +33,12 @@ class Model(Object):
logger
.
info
(
'deleting directory: %s'
%
self
.
outdir
)
shutil
.
rmtree
(
self
.
outdir
)
self
.
debug
=
debug
self
.
sess
=
tf
.
Session
(
config
=
tf_config
)
with
self
.
sess
as
default
:
self
.
train
()
loss
=
self
.
train
()
print
(
'lofff %s'
%
loss
)
def
generate_input
(
self
):
dataset
=
self
.
data_generator
.
get_dataset
()
...
...
@@ -70,35 +73,36 @@ class Model(Object):
input
=
tf
.
layers
.
conv2d
(
inputs
=
input
,
filters
=
n_filters
,
# dimensionality of output space *N of filters
# kernel_size=(n_channels, kernel_width), # use identity (1) along channels
kernel_size
=
(
cross_channel_kernel
,
kernel_width
),
# use identity (1) along channels
activation
=
tf
.
nn
.
relu
,
bias_initializer
=
initializer
,
name
=
name
)
input
=
tf
.
layers
.
batch_normalization
(
input
,
training
=
training
)
# extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
input
=
tf
.
layers
.
max_pooling2d
(
input
,
pool_size
=
(
2
,
2
),
# pool_size (height, width)
strides
=
(
1
,
2
)
# pool_size=(1, 2), # pool_size (height, width)
# strides=(1, 2)
)
tf
.
summary
.
image
(
'post-%s'
%
name
,
tf
.
split
(
input
,
num_or_size_splits
=
n_filters
,
axis
=-
1
)[
0
])
variable_summaries
(
input
,
name
)
if
self
.
debug
:
# super expensive!!
tf
.
summary
.
image
(
'post-%s'
%
name
,
tf
.
split
(
input
,
num_or_size_splits
=
n_filters
,
axis
=-
1
)[
0
])
variable_summaries
(
input
,
name
)
return
input
def
model
(
self
,
features
,
labels
,
mode
,
params
):
training
=
bool
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
)
n_filters
=
32
n_channels
,
n_samples
=
self
.
data_generator
.
tensor_shape
features
=
tf
.
reshape
(
features
,
[
-
1
,
n_channels
,
n_samples
,
1
])
features
=
tf
.
reshape
(
features
,
[
-
1
,
n_channels
,
n_samples
,
1
])
tf
.
summary
.
image
(
'input'
,
features
)
#
tf.summary.image('input', features)
conv
=
self
.
time_axis_cnn
(
features
,
n_filters
,
1
,
name
=
'conv1'
,
training
=
training
)
conv
=
self
.
time_axis_cnn
(
conv
,
n_filters
*
2
,
1
,
name
=
'conv2'
,
...
...
@@ -117,15 +121,11 @@ class Model(Object):
predictions
=
tf
.
layers
.
dense
(
fc
,
self
.
data_generator
.
n_classes
)
variable_summaries
(
predictions
,
'predictions'
)
# labels = tf.Print(labels, [labels], "Labels: ")
# predictions = tf.Print(predictions, [predictions], "Predictions: ")
# vector length
errors
=
predictions
-
labels
tf
.
summary
.
scalar
(
'error_z_mean'
,
tf
.
reduce_mean
(
errors
[
-
1
]))
tf
.
summary
.
scalar
(
'error_lateral_mean'
,
tf
.
reduce_mean
(
tf
.
sqrt
(
tf
.
reduce_sum
(
errors
[
0
:
1
]
**
2
,
keepdims
=
False
))))
tf
.
reduce_mean
(
tf
.
sqrt
(
tf
.
reduce_sum
(
errors
[
0
:
1
]
**
2
,
keepdims
=
False
))))
loss
=
tf
.
reduce_mean
(
tf
.
sqrt
(
tf
.
reduce_sum
((
predictions
-
labels
)
**
2
,
axis
=
1
,
keepdims
=
False
)))
loss
=
tf
.
Print
(
loss
,
[
loss
],
"Mean Euclidian Error [m]: "
)
...
...
@@ -141,9 +141,8 @@ class Model(Object):
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
params
[
'learning_rate'
]
)
learning_rate
=
params
.
get
(
'learning_rate'
,
1e-4
)
)
train_op
=
optimizer
.
minimize
(
loss
=
loss
,
global_step
=
tf
.
train
.
get_global_step
())
# tf.summary.scalar('accuracy', accuracy[1])
tf
.
summary
.
scalar
(
'loss'
,
loss
)
return
tf
.
estimator
.
EstimatorSpec
(
...
...
@@ -162,14 +161,15 @@ class Model(Object):
summary_op
=
tf
.
summary
.
merge_all
()
)
def
train
(
self
):
params
=
{
'learning_rate'
:
1e-4
}
def
train
(
self
,
params
=
None
):
params
=
params
or
{}
est
=
tf
.
estimator
.
Estimator
(
model_fn
=
self
.
model
,
model_dir
=
self
.
outdir
,
params
=
params
)
# OR can we use this highlevel shit?
# est = tf.estimator.LinearRegressor()
est
.
train
(
input_fn
=
self
.
generate_input
)
loss
=
est
.
train
(
input_fn
=
self
.
generate_input
)
return
loss
def
main
():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment