|
1 | 1 | from __future__ import print_function |
2 | | -import sys |
3 | | -import matplotlib |
4 | | -matplotlib.use('Agg') |
| 2 | + |
| 3 | +import numpy as np |
| 4 | +from hyperopt import Trials, tpe |
5 | 5 |
|
6 | 6 | from plasma.conf import conf |
7 | 7 | from pprint import pprint |
8 | 8 | pprint(conf) |
9 | | - |
10 | | -from plasma.primitives.shots import Shot, ShotList |
11 | | -import os |
12 | | - |
13 | | -from hyperopt import Trials, tpe |
14 | | -from hyperopt import STATUS_OK |
15 | | -from hyperas import optim |
16 | | - |
17 | | - |
18 | | -def data_wrapper(conf): |
19 | | - from plasma.models.loader import Loader |
20 | | - from plasma.preprocessor.normalize import VarNormalizer as Normalizer |
21 | | - import numpy as np |
22 | | - import theano |
23 | | - from keras.utils.generic_utils import Progbar |
24 | | - from keras import backend as K |
25 | | - |
26 | | - from plasma.models.runner import make_predictions_and_evaluate_gpu |
27 | | - from hyperas.distributions import choice, uniform, conditional |
28 | | - |
29 | | - from plasma.models.builder import ModelBuilder, LossHistory |
30 | | - |
31 | | - from keras.models import Sequential |
32 | | - from keras.layers.core import Dense, Activation, Dropout |
33 | | - from keras.layers.recurrent import LSTM, SimpleRNN |
34 | | - from keras.layers.wrappers import TimeDistributed |
35 | | - from keras.callbacks import Callback |
36 | | - from keras.optimizers import SGD, Adam, RMSprop, Nadam |
37 | | - from keras.regularizers import l1,l2,l1l2 |
38 | | - |
39 | | - |
40 | | - |
41 | | -def model_wrapper(conf): |
42 | | - |
43 | | - class HyperModelBuilder(ModelBuilder): |
44 | | - def build_model(self, predict, custom_batch_size=None): |
45 | | - conf = self.conf |
46 | | - model_conf = conf['model'] |
47 | | - rnn_size = model_conf['rnn_size'] |
48 | | - rnn_type = model_conf['rnn_type'] |
49 | | - optimizer = model_conf['optimizer'] |
50 | | - lr = model_conf['lr'] |
51 | | - clipnorm = model_conf['clipnorm'] |
52 | | - regularization = model_conf['regularization'] |
53 | | - |
54 | | - if optimizer == 'sgd': |
55 | | - optimizer_class = SGD |
56 | | - elif optimizer == 'adam': |
57 | | - optimizer_class = Adam |
58 | | - elif optimizer == 'rmsprop': |
59 | | - optimizer_class = RMSprop |
60 | | - elif optimizer == 'nadam': |
61 | | - optimizer_class = Nadam |
62 | | - else: |
63 | | - optimizer = optimizer |
64 | | - |
65 | | - if lr is not None or clipnorm is not None: |
66 | | - optimizer = optimizer_class(lr=lr, clipnorm=clipnorm) |
67 | | - |
68 | | - loss_fn = conf['data']['target'].loss |
69 | | - dropout_prob = model_conf['dropout_prob'] |
70 | | - length = model_conf['length'] |
71 | | - pred_length = model_conf['pred_length'] |
72 | | - skip = model_conf['skip'] |
73 | | - stateful = model_conf['stateful'] |
74 | | - return_sequences = model_conf['return_sequences'] |
75 | | - output_activation = conf['data']['target'].activation |
76 | | - num_signals = conf['data']['num_signals'] |
77 | | - |
78 | | - batch_size = self.conf['training']['batch_size'] |
79 | | - if predict: |
80 | | - batch_size = self.conf['model']['pred_batch_size'] |
81 | | - if return_sequences: |
82 | | - length = pred_length |
83 | | - else: |
84 | | - length = 1 |
85 | | - |
86 | | - if custom_batch_size is not None: |
87 | | - batch_size = custom_batch_size |
88 | | - |
89 | | - if rnn_type == 'LSTM': |
90 | | - rnn_model = LSTM |
91 | | - elif rnn_type == 'SimpleRNN': |
92 | | - rnn_model = SimpleRNN |
93 | | - else: |
94 | | - print('Unkown Model Type, exiting.') |
95 | | - exit(1) |
96 | | - |
97 | | - batch_input_shape = (batch_size, length, num_signals) |
98 | | - model = Sequential() |
99 | | - for _ in range(model_conf['rnn_layers']): |
100 | | - model.add(rnn_model(rnn_size, return_sequences=return_sequences, batch_input_shape=batch_input_shape, |
101 | | - stateful=stateful, W_regularizer=l2(regularization), |
102 | | - U_regularizer=l2(regularization), |
103 | | - b_regularizer=l2(regularization), dropout_W=dropout_prob, dropout_U=dropout_prob)) |
104 | | - model.add(Dropout({{uniform(0, 1)}})) |
105 | | - if return_sequences: |
106 | | - model.add(TimeDistributed(Dense(1, activation=output_activation))) |
107 | | - else: |
108 | | - model.add(Dense(1, activation=output_activation)) |
109 | | - model.compile(loss=loss_fn, optimizer=optimizer) |
110 | | - model.reset_states() |
111 | | - return model |
112 | | - |
113 | | - |
114 | | - nn = Normalizer(conf) |
115 | | - nn.train() |
116 | | - loader = Loader(conf,nn) |
117 | | - shot_list_train,shot_list_validate,shot_list_test = loader.load_shotlists(conf) |
118 | | - |
119 | | - |
120 | | - specific_builder = HyperModelBuilder(conf) |
121 | | - train_model, test_model = specific_builder.build_model(False), specific_builder.build_model(True) |
122 | | - |
123 | | - np.random.seed(1) |
124 | | - validation_losses = [] |
125 | | - validation_roc = [] |
126 | | - training_losses = [] |
127 | | - shot_list_train,shot_list_validate = shot_list_train.split_direct(1.0-conf['training']['validation_frac'],do_shuffle=True) |
128 | | - os.environ['THEANO_FLAGS'] = 'device=gpu,floatX=float32' |
129 | | - |
130 | | - num_epochs = conf['training']['num_epochs'] |
131 | | - num_at_once = conf['training']['num_shots_at_once'] |
132 | | - lr_decay = conf['model']['lr_decay'] |
133 | | - lr = conf['model']['lr'] |
134 | | - |
135 | | - resulting_dict = {'loss':None,'status':STATUS_OK,'model':None} |
136 | | - |
137 | | - e = -1 |
138 | | - while e < num_epochs-1: |
139 | | - e += 1 |
140 | | - pbar = Progbar(len(shot_list_train)) |
141 | | - |
142 | | - shot_list_train.shuffle() |
143 | | - shot_sublists = shot_list_train.sublists(num_at_once)[:1] |
144 | | - training_losses_tmp = [] |
145 | | - |
146 | | - K.set_value(train_model.optimizer.lr, lr*lr_decay**(e)) |
147 | | - for (i,shot_sublist) in enumerate(shot_sublists): |
148 | | - X_list,y_list = loader.load_as_X_y_list(shot_sublist) |
149 | | - for j,(X,y) in enumerate(zip(X_list,y_list)): |
150 | | - history = LossHistory() |
151 | | - train_model.fit(X,y, |
152 | | - batch_size=Loader.get_batch_size(conf['training']['batch_size'],prediction_mode=False), |
153 | | - nb_epoch=1,shuffle=False,verbose=0, |
154 | | - validation_split=0.0,callbacks=[history]) |
155 | | - train_model.reset_states() |
156 | | - train_loss = np.mean(history.losses) |
157 | | - training_losses_tmp.append(train_loss) |
158 | | - |
159 | | - pbar.add(1.0*len(shot_sublist)/len(X_list), values=[("train loss", train_loss)]) |
160 | | - loader.verbose=False |
161 | | - sys.stdout.flush() |
162 | | - training_losses.append(np.mean(training_losses_tmp)) |
163 | | - specific_builder.save_model_weights(train_model,e) |
164 | | - |
165 | | - roc_area,loss = make_predictions_and_evaluate_gpu(conf,shot_list_validate,loader) |
166 | | - validation_losses.append(loss) |
167 | | - validation_roc.append(roc_area) |
168 | | - resulting_dict['loss'] = loss |
169 | | - resulting_dict['model'] = train_model |
170 | | - |
171 | | - return resulting_dict |
172 | | - |
173 | | -best_run, best_model = optim.minimize(model=model_wrapper,data=data_wrapper,algo=tpe.suggest,max_evals=2,trials=Trials()) |
| 9 | +#from plasma.primitives.shots import Shot, ShotList |
| 10 | +from plasma.preprocessor.normalize import Normalizer |
| 11 | +from plasma.models.loader import Loader |
| 12 | +#from plasma.models.runner import train, make_predictions,make_predictions_gpu |
| 13 | + |
| 14 | +if conf['data']['normalizer'] == 'minmax': |
| 15 | + from plasma.preprocessor.normalize import MinMaxNormalizer as Normalizer |
| 16 | +elif conf['data']['normalizer'] == 'meanvar': |
| 17 | + from plasma.preprocessor.normalize import MeanVarNormalizer as Normalizer |
| 18 | +elif conf['data']['normalizer'] == 'var': |
| 19 | + from plasma.preprocessor.normalize import VarNormalizer as Normalizer #performs !much better than minmaxnormalizer |
| 20 | +elif conf['data']['normalizer'] == 'averagevar': |
| 21 | + from plasma.preprocessor.normalize import AveragingVarNormalizer as Normalizer #performs !much better than minmaxnormalizer |
| 22 | +else: |
| 23 | + print('unkown normalizer. exiting') |
| 24 | + exit(1) |
| 25 | + |
| 26 | +np.random.seed(1) |
| 27 | + |
| 28 | +print("normalization",end='') |
| 29 | +nn = Normalizer(conf) |
| 30 | +nn.train() |
| 31 | +loader = Loader(conf,nn) |
| 32 | +shot_list_train,shot_list_validate,shot_list_test = loader.load_shotlists(conf) |
| 33 | +print("...done") |
| 34 | + |
| 35 | +print('Training on {} shots, testing on {} shots'.format(len(shot_list_train),len(shot_list_test))) |
| 36 | +from plasma.models import runner |
| 37 | + |
| 38 | +specific_runner = runner.HyperRunner(conf,loader,shot_list_train) |
| 39 | + |
| 40 | +best_run, best_model = specific_runner.frnn_minimize(algo=tpe.suggest,max_evals=2,trials=Trials()) |
174 | 41 | print (best_run) |
175 | 42 | print (best_model) |
0 commit comments