Skip to content

Commit cd20621

Browse files
author
Julian Kates-Harbeck
committed
added extra parameter extra_dense_input, whether we want an extra dense layer before the rnn
1 parent 7b2d8b6 commit cd20621

File tree

1 file changed

+5
-4
lines changed

1 file changed

+5
-4
lines changed

examples/conf.yaml

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -79,18 +79,19 @@ model:
7979
rnn_type: 'LSTM'
8080
#TODO optimize
8181
rnn_layers: 2
82-
num_conv_filters: 10
82+
num_conv_filters: 128
8383
size_conv_filters: 3
8484
num_conv_layers: 3
8585
pool_size: 2
86-
dense_size: 200
86+
dense_size: 128
87+
extra_dense_input: False
8788
#have not found a difference yet
8889
optimizer: 'adam'
8990
clipnorm: 10.0
9091
regularization: 0.0
9192
dense_regularization: 0.01
9293
#1e-4 is too high, 5e-7 is too low. 5e-5 seems best at 256 batch size, full dataset and ~10 epochs, and lr decay of 0.90. 1e-4 also works well if we decay a lot (i.e ~0.7 or more)
93-
lr: 0.00001 #0.00001 #0.0005 #for adam plots 0.0000001 #0.00005 #0.00005 #0.00005
94+
lr: 0.00002 #0.00001 #0.0005 #for adam plots 0.0000001 #0.00005 #0.00005 #0.00005
9495
lr_decay: 0.97 #0.98 #0.9
9596
stateful: True
9697
return_sequences: True
@@ -109,7 +110,7 @@ training:
109110
max_patch_length: 100000
110111
#How many shots are we loading at once?
111112
num_shots_at_once: 200
112-
num_epochs: 400
113+
num_epochs: 1000
113114
use_mock_data: False
114115
data_parallel: False
115116
hyperparam_tuning: False

0 commit comments

Comments
 (0)