Skip to content

Commit 98cbac7

Browse files
committed
Start using saner line breaks around fn defs and calls
reduce # of lines by passing multiple arguments per line than what autopep8 does
1 parent e14a678 commit 98cbac7

File tree

6 files changed

+153
-375
lines changed

6 files changed

+153
-375
lines changed

plasma/models/loader.py

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -107,13 +107,8 @@ def training_batch_generator(self, shot_list):
107107
num_so_far, num_total
108108
epoch += 1
109109

110-
def fill_training_buffer(
111-
self,
112-
Xbuff,
113-
Ybuff,
114-
end_indices,
115-
shot,
116-
is_first_fill=False):
110+
def fill_training_buffer(self, Xbuff, Ybuff, end_indices, shot,
111+
is_first_fill=False):
117112
sig, res = self.get_signal_result_from_shot(shot)
118113
length = self.conf['model']['length']
119114
if is_first_fill: # cut signal to random position
@@ -388,20 +383,16 @@ def fill_batch_queue(self, shot_list, queue):
388383

389384
def training_batch_generator_process(self, shot_list):
390385
queue = mp.Queue()
391-
proc = mp.Process(
392-
target=self.fill_batch_queue, args=(
393-
shot_list, queue))
386+
proc = mp.Process(target=self.fill_batch_queue,
387+
args=(shot_list, queue))
394388
proc.start()
395389
while True:
396390
yield queue.get(True)
397391
proc.join()
398392
queue.close()
399393

400-
def load_as_X_y_list(
401-
self,
402-
shot_list,
403-
verbose=False,
404-
prediction_mode=False):
394+
def load_as_X_y_list(self, shot_list, verbose=False,
395+
prediction_mode=False):
405396
"""
406397
The method turns a ShotList into a set of equal-sized patches which
407398
contain a number of examples that is a multiple of the batch size.
@@ -696,12 +687,8 @@ def arange_patches(self, sig_patches, res_patches):
696687
y_list.append(y)
697688
return X_list, y_list
698689

699-
def arange_patches_single(
700-
self,
701-
sig_patches,
702-
res_patches,
703-
prediction_mode=False,
704-
custom_batch_size=None):
690+
def arange_patches_single(self, sig_patches, res_patches,
691+
prediction_mode=False, custom_batch_size=None):
705692
if prediction_mode:
706693
num_timesteps = self.conf['model']['pred_length']
707694
batch_size = self.conf['model']['pred_batch_size']

plasma/models/mpi_runner.py

Lines changed: 13 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,8 @@
5454

5555
if backend == 'tf' or backend == 'tensorflow':
5656
if NUM_GPUS > 1:
57-
os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(
58-
MY_GPU) # ,mode=NanGuardMode'
57+
os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(MY_GPU)
58+
# ,mode=NanGuardMode'
5959
os.environ['KERAS_BACKEND'] = 'tensorflow'
6060
import tensorflow as tf
6161
from keras.backend.tensorflow_backend import set_session
@@ -749,8 +749,8 @@ def mpi_make_predictions(conf, shot_list, loader, custom_path=None):
749749
return y_prime_global, y_gold_global, disruptive_global
750750

751751

752-
def mpi_make_predictions_and_evaluate(
753-
conf, shot_list, loader, custom_path=None):
752+
def mpi_make_predictions_and_evaluate(conf, shot_list, loader,
753+
custom_path=None):
754754
y_prime, y_gold, disruptive = mpi_make_predictions(
755755
conf, shot_list, loader, custom_path)
756756
analyzer = PerformanceAnalyzer(conf=conf)
@@ -823,17 +823,9 @@ def mpi_train(conf, shot_list_train, shot_list_validate, loader,
823823
shot_list=shot_list_train)
824824

825825
print("warmup {}".format(warmup_steps))
826-
mpi_model = MPIModel(
827-
train_model,
828-
optimizer,
829-
comm,
830-
batch_generator,
831-
batch_size,
832-
lr=lr,
833-
warmup_steps=warmup_steps,
834-
num_batches_minimum=num_batches_minimum,
835-
conf=conf
836-
)
826+
mpi_model = MPIModel(train_model, optimizer, comm, batch_generator,
827+
batch_size, lr=lr, warmup_steps=warmup_steps,
828+
num_batches_minimum=num_batches_minimum, conf=conf)
837829
mpi_model.compile(
838830
conf['model']['optimizer'],
839831
clipnorm,
@@ -940,13 +932,11 @@ def mpi_train(conf, shot_list_train, shot_list_validate, loader,
940932

941933
# tensorboard
942934
if backend != 'theano':
943-
val_generator = partial(
944-
loader.training_batch_generator,
945-
shot_list=shot_list_validate)()
935+
val_generator = partial(loader.training_batch_generator,
936+
shot_list=shot_list_validate)()
946937
val_steps = 1
947-
tensorboard.on_epoch_end(
948-
val_generator, val_steps, int(
949-
round(e)), epoch_logs)
938+
tensorboard.on_epoch_end(val_generator, val_steps,
939+
int(round(e)), epoch_logs)
950940

951941
print_unique("end epoch {} 0".format(e))
952942
stop_training = comm.bcast(stop_training, root=0)
@@ -972,11 +962,8 @@ def get_stop_training(callbacks):
972962

973963

974964
class TensorBoard(object):
975-
def __init__(self, log_dir='./logs',
976-
histogram_freq=0,
977-
validation_steps=0,
978-
write_graph=True,
979-
write_grads=False):
965+
def __init__(self, log_dir='./logs', histogram_freq=0, validation_steps=0,
966+
write_graph=True, write_grads=False):
980967
if K.backend() != 'tensorflow':
981968
raise RuntimeError('TensorBoard callback only works '
982969
'with the TensorFlow backend.')

plasma/models/shallow_runner.py

Lines changed: 7 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,8 @@ def get_sample_probs(self, shot_list, num_samples):
7171
return val, val
7272
return sample_prob_d, sample_prob_nd
7373

74-
def load_shots(
75-
self,
76-
shot_list,
77-
is_inference=False,
78-
as_list=False,
79-
num_samples=np.Inf):
74+
def load_shots(self, shot_list, is_inference=False, as_list=False,
75+
num_samples=np.Inf):
8076
X = []
8177
Y = []
8278
Disr = []
@@ -153,7 +149,6 @@ def process(self, shot):
153149
return X, Y, disr
154150

155151
def get_X(self, shot):
156-
157152
use_signals = self.loader.conf['paths']['use_signals']
158153
sig_sample = shot.signals_dict[use_signals[0]]
159154
if len(shot.ttd.shape) == 1:
@@ -181,13 +176,8 @@ def get_Y(self, shot):
181176
offset = self.timesteps - 1
182177
return np.round(shot.ttd[offset:, 0]).astype(np.int)
183178

184-
def load_shot(
185-
self,
186-
shot,
187-
is_inference=False,
188-
sample_prob_d=1.0,
189-
sample_prob_nd=1.0):
190-
179+
def load_shot(self, shot, is_inference=False, sample_prob_d=1.0,
180+
sample_prob_nd=1.0):
191181
X, Y, disr = self.process(shot)
192182

193183
# cut shot ends if we are supposed to
@@ -200,10 +190,9 @@ def load_shot(
200190
if disr:
201191
sample_prob = sample_prob_d
202192
if sample_prob < 1.0:
203-
indices = np.sort(
204-
np.random.choice(np.array(range(len(Y))),
205-
int(round(sample_prob*len(Y))),
206-
replace=False))
193+
indices = np.sort(np.random.choice(np.array(range(len(Y))),
194+
int(round(sample_prob*len(Y))),
195+
replace=False))
207196
X = X[indices]
208197
Y = Y[indices]
209198
return X, Y, disr

plasma/models/torch_runner.py

Lines changed: 24 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -66,10 +66,8 @@ def __init__(self, n_scalars, n_profiles, profile_size, layer_sizes,
6666
self.conv_output_size = self.conv_output_size*layer_sizes[-1]
6767
self.linear_layers = []
6868

69-
print(
70-
"Final feature size = {}".format(
71-
self.n_scalars
72-
+ self.conv_output_size))
69+
print("Final feature size = {}".format(self.n_scalars
70+
+ self.conv_output_size))
7371
self.linear_layers.append(
7472
nn.Linear(
7573
self.conv_output_size
@@ -111,8 +109,8 @@ def forward(self, x):
111109

112110

113111
def calculate_conv_output_size(L_in, padding, dilation, stride, kernel_size):
114-
return int(np.floor((L_in + 2*padding - dilation
115-
* (kernel_size-1) - 1)*1.0/stride + 1))
112+
return int(np.floor(
113+
(L_in + 2*padding - dilation * (kernel_size-1) - 1)*1.0/stride + 1))
116114

117115

118116
class Chomp1d(nn.Module):
@@ -125,51 +123,28 @@ def forward(self, x):
125123

126124

127125
class TemporalBlock(nn.Module):
128-
def __init__(
129-
self,
130-
n_inputs,
131-
n_outputs,
132-
kernel_size,
133-
stride,
134-
dilation,
135-
padding,
136-
dropout=0.2):
126+
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation,
127+
padding, dropout=0.2):
137128
super(TemporalBlock, self).__init__()
138-
self.conv1 = weight_norm(
139-
nn.Conv1d(
140-
n_inputs,
141-
n_outputs,
142-
kernel_size,
143-
stride=stride,
144-
padding=padding,
145-
dilation=dilation))
129+
self.conv1 = weight_norm(nn.Conv1d(
130+
n_inputs, n_outputs, kernel_size, stride=stride, padding=padding,
131+
dilation=dilation))
146132
self.chomp1 = Chomp1d(padding)
147133
self.relu1 = nn.ReLU()
148134
self.dropout1 = nn.Dropout2d(dropout)
149135

150-
self.conv2 = weight_norm(
151-
nn.Conv1d(
152-
n_outputs,
153-
n_outputs,
154-
kernel_size,
155-
stride=stride,
156-
padding=padding,
157-
dilation=dilation))
136+
self.conv2 = weight_norm(nn.Conv1d(
137+
n_outputs, n_outputs, kernel_size, stride=stride, padding=padding,
138+
dilation=dilation))
158139
self.chomp2 = Chomp1d(padding)
159140
self.relu2 = nn.ReLU()
160141
self.dropout2 = nn.Dropout2d(dropout)
161142

162-
self.net = nn.Sequential(
163-
self.conv1,
164-
self.chomp1,
165-
self.relu1,
166-
self.dropout1,
167-
self.conv2,
168-
self.chomp2,
169-
self.relu2,
170-
self.dropout2)
171-
self.downsample = nn.Conv1d(
172-
n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
143+
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1,
144+
self.dropout1, self.conv2, self.chomp2,
145+
self.relu2, self.dropout2)
146+
self.downsample = (nn.Conv1d(n_inputs, n_outputs, 1)
147+
if n_inputs != n_outputs else None)
173148
self.relu = nn.ReLU()
174149
self.init_weights()
175150

@@ -196,11 +171,8 @@ def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
196171
dilation_size = 2 ** i
197172
in_channels = num_inputs if i == 0 else num_channels[i-1]
198173
out_channels = num_channels[i]
199-
layers += [TemporalBlock(in_channels,
200-
out_channels,
201-
kernel_size,
202-
stride=1,
203-
dilation=dilation_size,
174+
layers += [TemporalBlock(in_channels, out_channels, kernel_size,
175+
stride=1, dilation=dilation_size,
204176
padding=(kernel_size-1) * dilation_size,
205177
dropout=dropout)]
206178

@@ -211,19 +183,11 @@ def forward(self, x):
211183

212184

213185
class TCN(nn.Module):
214-
def __init__(
215-
self,
216-
input_size,
217-
output_size,
218-
num_channels,
219-
kernel_size,
220-
dropout):
186+
def __init__(self, input_size, output_size, num_channels, kernel_size,
187+
dropout):
221188
super(TCN, self).__init__()
222-
self.tcn = TemporalConvNet(
223-
input_size,
224-
num_channels,
225-
kernel_size,
226-
dropout=dropout)
189+
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size,
190+
dropout=dropout)
227191
self.linear = nn.Linear(num_channels[-1], output_size)
228192
# self.sig = nn.Sigmoid()
229193

@@ -278,7 +242,6 @@ def __init__(self, module, batch_first=False):
278242
self.batch_first = batch_first
279243

280244
def forward(self, x):
281-
282245
if len(x.size()) <= 2:
283246
return self.module(x)
284247

@@ -301,8 +264,7 @@ def forward(self, x):
301264

302265
def build_torch_model(conf):
303266
dropout = conf['model']['dropout_prob']
304-
# dim = 10
305-
267+
# dim = 10
306268
# lin = nn.Linear(input_size,intermediate_dim)
307269
n_scalars, n_profiles, profile_size = get_signal_dimensions(conf)
308270
# dim = n_scalars + n_profiles*profile_size
@@ -445,9 +407,7 @@ def train_epoch(model, data_gen, optimizer, loss_fn):
445407

446408

447409
def train(conf, shot_list_train, shot_list_validate, loader):
448-
449410
np.random.seed(1)
450-
451411
# data_gen = ProcessGenerator(partial(
452412
# loader.training_batch_generator_full_shot_partial_reset,shot_list
453413
# = shot_list_train)())

0 commit comments

Comments
 (0)