Commit 782084ea authored by Marius Kriegerowski's avatar Marius Kriegerowski

some refactor

parent db84919f
......@@ -10,15 +10,15 @@ class Optimizer(Object):
path_best = String.T(default='winner')
def evaluate(self, model):
# loss = model(**)
result = model.train()
loss = result['loss']
return loss
def optimize(self):
default_parameters = [
self.learning_rate[-1]
]
gp_minimize(func=self.evaluate,
x0=default_parameters)
gp_minimize(func=self.evaluate, x0=default_parameters)
def log_dir_name(self, learning_rate):
......
from .data import *
from .tf_util import *
......@@ -25,7 +26,8 @@ class Model(Object):
shuffle_size = Int.T(
optional=True, help='if set, shuffle examples at given buffer size.')
def __call__(self, tf_config=None, debug=False):
def __init__(self, tf_config=None, debug=False, **kwargs):
super().__init__(**kwargs)
if self.auto_clear and os.path.exists(self.summary_outdir):
logger.info('deleting directory: %s' % self.summary_outdir)
......@@ -33,19 +35,16 @@ class Model(Object):
logger.info('deleting directory: %s' % self.outdir)
shutil.rmtree(self.outdir)
self.tf_config = tf_config
self.debug = debug
self.sess = tf.Session(config=tf_config)
with self.sess as default:
loss = self.train()
print('lofff %s' % loss)
def generate_input(self):
dataset = self.data_generator.get_dataset()
dataset = dataset.batch(self.batch_size)
if self.shuffle_size:
dataset = dataset.shuffle(buffer_size=self.shuffle_size)
dataset = dataset.repeat()
# dataset = dataset.repeat()
dataset = dataset.prefetch(buffer_size=self.batch_size)
return dataset.make_one_shot_iterator().get_next()
......@@ -72,7 +71,7 @@ class Model(Object):
input = tf.layers.conv2d(
inputs=input,
filters=n_filters, # dimensionality of output space *N of filters
filters=n_filters,
kernel_size=(cross_channel_kernel, kernel_width), # use identity (1) along channels
activation=tf.nn.relu,
bias_initializer=initializer,
......@@ -81,7 +80,7 @@ class Model(Object):
input = tf.layers.batch_normalization(input, training=training)
input = tf.layers.max_pooling2d(
input,
pool_size=(2, 2), # pool_size (height, width)
pool_size=(2, 2), # (height, width)
strides=(1, 2)
)
......@@ -142,7 +141,8 @@ class Model(Object):
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(
learning_rate=params.get('learning_rate', 1e-4))
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_global_step())
tf.summary.scalar('loss', loss)
return tf.estimator.EstimatorSpec(
......@@ -164,12 +164,13 @@ class Model(Object):
def train(self, params=None):
params = params or {}
est = tf.estimator.Estimator(
model_fn=self.model, model_dir=self.outdir, params=params)
loss = est.train(input_fn=self.generate_input)
with self.sess as default:
est = tf.estimator.Estimator(
model_fn=self.model, model_dir=self.outdir, params=params)
return loss
est.train(input_fn=self.generate_input)
result = est.evaluate(input_fn=self.generate_input, steps=1)
return result
def main():
......@@ -237,10 +238,10 @@ def main():
default_store_id='vogtland_001')
data_generator = OnTheFlyData(fn_stations='stations.pf', gf_engine=gf_engine)
model = Model(data_generator=data_generator)
model = Model(tf_config=tf_config, data_generator=data_generator)
model.regularize()
model.dump(filename=fn_config)
print('created a fresh "%s"' % fn_config)
if args.train:
model(tf_config)
model.train()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment