Skip to content

Commit e14a678

Browse files
committed
Manually fix PEP 8 issues in torch_runner.py
1 parent 3f9ea6d commit e14a678

File tree

1 file changed

+65
-113
lines changed

1 file changed

+65
-113
lines changed

plasma/models/torch_runner.py

Lines changed: 65 additions & 113 deletions
Original file line numberDiff line numberDiff line change
@@ -1,87 +1,41 @@
1-
import keras.callbacks as cbks
1+
from __future__ import print_function
22
from keras.utils.generic_utils import Progbar
33
from torch.nn.utils import weight_norm
44
import torch.optim as opt
55
from torch.autograd import Variable
66
import torch.nn as nn
77
import torch
8-
import hashlib
98
from plasma.utils.downloading import makedirs_process_safe
10-
from plasma.utils.state_reset import reset_states
11-
from plasma.utils.evaluation import *
129
from plasma.utils.performance import PerformanceAnalyzer
13-
from plasma.models.loader import Loader, ProcessGenerator
14-
from plasma.conf import conf
15-
from sklearn.neural_network import MLPClassifier
16-
from xgboost import XGBClassifier
17-
import pathos.multiprocessing as mp
10+
from plasma.utils.evaluation import get_loss_from_list
1811
from functools import partial
1912
import os
20-
import datetime
21-
import time
22-
import sys
2313
import numpy as np
24-
import matplotlib.pyplot as plt
25-
from __future__ import print_function
26-
import matplotlib
27-
matplotlib.use('Agg')
28-
29-
if sys.version_info[0] < 3:
30-
from itertools import imap
31-
32-
# leading to import errors:
33-
#from hyperopt import hp, STATUS_OK
34-
#from hyperas.distributions import conditional
35-
3614

3715
model_filename = 'torch_model.pt'
3816

3917

4018
class FTCN(nn.Module):
41-
def __init__(
42-
self,
43-
n_scalars,
44-
n_profiles,
45-
profile_size,
46-
layer_sizes_spatial,
47-
kernel_size_spatial,
48-
linear_size,
49-
output_size,
50-
num_channels_tcn,
51-
kernel_size_temporal,
52-
dropout=0.1):
19+
def __init__(self, n_scalars, n_profiles, profile_size,
20+
layer_sizes_spatial, kernel_size_spatial,
21+
linear_size, output_size,
22+
num_channels_tcn, kernel_size_temporal, dropout=0.1):
5323
super(FTCN, self).__init__()
54-
self.lin = InputBlock(
55-
n_scalars,
56-
n_profiles,
57-
profile_size,
58-
layer_sizes_spatial,
59-
kernel_size_spatial,
60-
linear_size,
61-
dropout)
24+
self.lin = InputBlock(n_scalars, n_profiles, profile_size,
25+
layer_sizes_spatial, kernel_size_spatial,
26+
linear_size, dropout)
6227
self.input_layer = TimeDistributed(self.lin, batch_first=True)
63-
self.tcn = TCN(
64-
linear_size,
65-
output_size,
66-
num_channels_tcn,
67-
kernel_size_temporal,
68-
dropout)
28+
self.tcn = TCN(linear_size, output_size, num_channels_tcn,
29+
kernel_size_temporal, dropout)
6930
self.model = nn.Sequential(self.input_layer, self.tcn)
7031

7132
def forward(self, x):
7233
return self.model(x)
7334

7435

7536
class InputBlock(nn.Module):
76-
def __init__(
77-
self,
78-
n_scalars,
79-
n_profiles,
80-
profile_size,
81-
layer_sizes,
82-
kernel_size,
83-
linear_size,
84-
dropout=0.2):
37+
def __init__(self, n_scalars, n_profiles, profile_size, layer_sizes,
38+
kernel_size, linear_size, dropout=0.2):
8539
super(InputBlock, self).__init__()
8640
self.pooling_size = 2
8741
self.n_scalars = n_scalars
@@ -98,18 +52,15 @@ def __init__(
9852
input_size = n_profiles
9953
else:
10054
input_size = layer_sizes[i-1]
101-
self.layers.append(
102-
weight_norm(
103-
nn.Conv1d(
104-
input_size,
105-
layer_size,
106-
kernel_size)))
55+
self.layers.append(weight_norm(
56+
nn.Conv1d(input_size, layer_size, kernel_size)))
10757
self.layers.append(nn.ReLU())
10858
self.conv_output_size = calculate_conv_output_size(
10959
self.conv_output_size, 0, 1, 1, kernel_size)
11060
self.layers.append(nn.MaxPool1d(kernel_size=self.pooling_size))
11161
self.conv_output_size = calculate_conv_output_size(
112-
self.conv_output_size, 0, 1, self.pooling_size, self.pooling_size)
62+
self.conv_output_size, 0, 1, self.pooling_size,
63+
self.pooling_size)
11364
self.layers.append(nn.Dropout2d(dropout))
11465
self.net = nn.Sequential(*self.layers)
11566
self.conv_output_size = self.conv_output_size*layer_sizes[-1]
@@ -294,7 +245,9 @@ def forward(self, x):
294245
# for i in range(iters):
295246
# x_,y_,mask_ = data_gen()
296247
# # print(y)
297-
# x, y, mask = Variable(torch.from_numpy(x_).float()), Variable(torch.from_numpy(y_).float()),Variable(torch.from_numpy(mask_).byte())
248+
# x, y, mask = Variable(torch.from_numpy(x_).float()),
249+
# Variable(torch.from_numpy(y_).float()),Variable(torch.from_numpy(mask_)
250+
# . byte())
298251
# # print(y)
299252
# optimizer.zero_grad()
300253
# # output = model(x.unsqueeze(0)).squeeze(0)
@@ -312,7 +265,8 @@ def forward(self, x):
312265
# optimizer.step()
313266
# if i > 0 and i % log_step == 0:
314267
# cur_loss = total_loss / count
315-
# print("Epoch {:2d} | lr {:.5f} | loss {:.5f}".format(0,lr, cur_loss))
268+
# print("Epoch {:2d} | lr {:.5f} | loss {:.5f}".format(0,lr,
269+
# # cur_loss))
316270
# total_loss = 0.0
317271
# count = 0
318272

@@ -351,8 +305,8 @@ def build_torch_model(conf):
351305

352306
# lin = nn.Linear(input_size,intermediate_dim)
353307
n_scalars, n_profiles, profile_size = get_signal_dimensions(conf)
354-
dim = n_scalars+n_profiles*profile_size
355-
input_size = dim
308+
# dim = n_scalars + n_profiles*profile_size
309+
# input_size = dim
356310
output_size = 1
357311
# intermediate_dim = 15
358312

@@ -362,17 +316,9 @@ def build_torch_model(conf):
362316

363317
num_channels_tcn = [10, 5, 3, 3] # [3]*5
364318
kernel_size_temporal = 3 # 3
365-
model = FTCN(
366-
n_scalars,
367-
n_profiles,
368-
profile_size,
369-
layer_sizes_spatial,
370-
kernel_size_spatial,
371-
linear_size,
372-
output_size,
373-
num_channels_tcn,
374-
kernel_size_temporal,
375-
dropout)
319+
model = FTCN(n_scalars, n_profiles, profile_size, layer_sizes_spatial,
320+
kernel_size_spatial, linear_size, output_size,
321+
num_channels_tcn, kernel_size_temporal, dropout)
376322

377323
return model
378324

@@ -392,17 +338,19 @@ def get_signal_dimensions(conf):
392338
n_profiles += 1
393339
is_1D_region = True
394340
else:
395-
assert(
396-
not is_1D_region), "make sure all use_signals are ordered such that 1D signals come last!"
397-
assert(num_channels == 1)
341+
assert not is_1D_region, (
342+
"make sure all use_signals are ordered such that ",
343+
"1D signals come last!")
344+
assert num_channels == 1
398345
n_scalars += 1
399346
is_1D_region = False
400347
return n_scalars, n_profiles, profile_size
401348

402349

403350
def apply_model_to_np(model, x):
404351
# return
405-
# model(Variable(torch.from_numpy(x).float()).unsqueeze(0)).squeeze(0).data.numpy()
352+
# model(Variable(torch.from_numpy(x).float()).unsqueeze(0)).squeeze(
353+
# 0).data.numpy()
406354
return model(Variable(torch.from_numpy(x).float())).data.numpy()
407355

408356

@@ -415,7 +363,7 @@ def make_predictions(conf, shot_list, loader, custom_path=None):
415363
else:
416364
model_path = custom_path
417365
inference_model.load_state_dict(torch.load(model_path))
418-
#shot_list = shot_list.random_sublist(10)
366+
# shot_list = shot_list.random_sublist(10)
419367

420368
y_prime = []
421369
y_gold = []
@@ -425,7 +373,9 @@ def make_predictions(conf, shot_list, loader, custom_path=None):
425373
pbar = Progbar(num_shots)
426374
while True:
427375
x, y, mask, disr, lengths, num_so_far, num_total = next(generator)
428-
#x, y, mask = Variable(torch.from_numpy(x_).float()), Variable(torch.from_numpy(y_).float()),Variable(torch.from_numpy(mask_).byte())
376+
# x, y, mask = Variable(torch.from_numpy(x_).float()),
377+
# Variable(torch.from_numpy(y_).float()),
378+
# Variable(torch.from_numpy(mask_).byte())
429379
output = apply_model_to_np(inference_model, x)
430380
for batch_idx in range(x.shape[0]):
431381
curr_length = lengths[batch_idx]
@@ -441,8 +391,8 @@ def make_predictions(conf, shot_list, loader, custom_path=None):
441391
return y_prime, y_gold, disruptive
442392

443393

444-
def make_predictions_and_evaluate_gpu(
445-
conf, shot_list, loader, custom_path=None):
394+
def make_predictions_and_evaluate_gpu(conf, shot_list, loader,
395+
custom_path=None):
446396
y_prime, y_gold, disruptive = make_predictions(
447397
conf, shot_list, loader, custom_path)
448398
analyzer = PerformanceAnalyzer(conf=conf)
@@ -452,8 +402,8 @@ def make_predictions_and_evaluate_gpu(
452402

453403

454404
def get_model_path(conf):
455-
return conf['paths']['model_save_path'] + 'torch/' + \
456-
model_filename # save_prepath + model_filename
405+
return (conf['paths']['model_save_path'] + 'torch/'
406+
+ model_filename) # save_prepath + model_filename
457407

458408

459409
def train_epoch(model, data_gen, optimizer, loss_fn):
@@ -485,8 +435,9 @@ def train_epoch(model, data_gen, optimizer, loss_fn):
485435
loss.backward()
486436
optimizer.step()
487437
step += 1
488-
print("[{}] [{}/{}] loss: {:.3f}, ave_loss: {:.3f}".format(step,
489-
num_so_far-num_so_far_start, num_total, loss.data[0], total_loss/step))
438+
print("[{}] [{}/{}] loss: {:.3f}, ave_loss: {:.3f}".format(
439+
step, num_so_far - num_so_far_start, num_total, loss.data[0],
440+
total_loss/step))
490441
if num_so_far-num_so_far_start >= num_total:
491442
break
492443
x_, y_, mask_, num_so_far, num_total = next(data_gen)
@@ -497,19 +448,16 @@ def train(conf, shot_list_train, shot_list_validate, loader):
497448

498449
np.random.seed(1)
499450

500-
#data_gen = ProcessGenerator(partial(loader.training_batch_generator_full_shot_partial_reset,shot_list=shot_list_train)())
451+
# data_gen = ProcessGenerator(partial(
452+
# loader.training_batch_generator_full_shot_partial_reset,shot_list
453+
# = shot_list_train)())
501454
data_gen = partial(
502455
loader.training_batch_generator_full_shot_partial_reset,
503456
shot_list=shot_list_train)()
504-
505-
print(
506-
'validate: {} shots, {} disruptive'.format(
507-
len(shot_list_validate),
508-
shot_list_validate.num_disruptive()))
509-
print(
510-
'training: {} shots, {} disruptive'.format(
511-
len(shot_list_train),
512-
shot_list_train.num_disruptive()))
457+
print('validate: {} shots, {} disruptive'.format(
458+
len(shot_list_validate), shot_list_validate.num_disruptive()))
459+
print('training: {} shots, {} disruptive'.format(
460+
len(shot_list_train), shot_list_train.num_disruptive()))
513461

514462
loader.set_inference_mode(False)
515463

@@ -521,16 +469,18 @@ def train(conf, shot_list_train, shot_list_validate, loader):
521469
num_epochs = conf['training']['num_epochs']
522470
patience = conf['callbacks']['patience']
523471
lr_decay = conf['model']['lr_decay']
524-
batch_size = conf['training']['batch_size']
472+
# batch_size = conf['training']['batch_size']
525473
lr = conf['model']['lr']
526-
clipnorm = conf['model']['clipnorm']
474+
# clipnorm = conf['model']['clipnorm']
527475
e = 0
528476
# warmup_steps = conf['model']['warmup_steps']
529477
# num_batches_minimum = conf['training']['num_batches_minimum']
530478

531479
# if 'adam' in conf['model']['optimizer']:
532480
# optimizer = MPIAdam(lr=lr)
533-
# elif conf['model']['optimizer'] == 'sgd' or conf['model']['optimizer'] == 'tf_sgd':
481+
# elif conf['model']['optimizer'] == 'sgd' or conf['model']['optimizer'] ==
482+
# 'tf_sgd':
483+
#
534484
# optimizer = MPISGD(lr=lr)
535485
# elif 'momentum_sgd' in conf['model']['optimizer']:
536486
# optimizer = MPIMomentumSGD(lr=lr)
@@ -550,16 +500,17 @@ def train(conf, shot_list_train, shot_list_validate, loader):
550500
scheduler = opt.lr_scheduler.ExponentialLR(optimizer, lr_decay)
551501
train_model.train()
552502
not_updated = 0
553-
total_loss = 0
554-
count = 0
503+
# total_loss = 0
504+
# count = 0
555505
loss_fn = nn.MSELoss(size_average=True)
556506
model_path = get_model_path(conf)
557507
makedirs_process_safe(os.path.dirname(model_path))
558-
while e < num_epochs-1:
508+
while e < num_epochs - 1:
559509
scheduler.step()
560510
print('\nEpoch {}/{}'.format(e, num_epochs))
561-
(step, ave_loss, curr_loss, num_so_far, effective_epochs) = train_epoch(
562-
train_model, data_gen, optimizer, loss_fn)
511+
(step, ave_loss, curr_loss, num_so_far,
512+
effective_epochs) = train_epoch(train_model, data_gen, optimizer,
513+
loss_fn)
563514
e = effective_epochs
564515
loader.verbose = False # True during the first iteration
565516
# if task_index == 0:
@@ -570,13 +521,14 @@ def train(conf, shot_list_train, shot_list_validate, loader):
570521

571522
best_so_far = cmp_fn(roc_area, best_so_far)
572523

573-
stop_training = False
524+
# stop_training = False
574525
print('=========Summary======== for epoch{}'.format(step))
575526
print('Training Loss numpy: {:.3e}'.format(ave_loss))
576527
print('Validation Loss: {:.3e}'.format(loss))
577528
print('Validation ROC: {:.4f}'.format(roc_area))
578529

579-
if best_so_far != roc_area: # only save model weights if quantity we are tracking is improving
530+
# only save model weights if the quantity we are tracking is improving
531+
if best_so_far != roc_area:
580532
print("No improvement, still saving model")
581533
not_updated += 1
582534
else:

0 commit comments

Comments
 (0)