Skip to content

Commit 82ca209

Browse files
committed
Apply ruff formatting to neural network optimizers
- Applied consistent code formatting across all optimizer files - This matches the formatting that pre-commit.ci attempted to apply - Resolves ruff format failures in CI Files formatted: adagrad.py, adam.py, momentum_sgd.py, nag.py, sgd.py
1 parent fa03d6e commit 82ca209

File tree

5 files changed

+18
-45
lines changed

5 files changed

+18
-45
lines changed

neural_network/optimizers/adagrad.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -122,10 +122,9 @@ def update(
122122
def _adagrad_update_recursive(
123123
parameters: float | list[float | list[float]],
124124
gradients: float | list[float | list[float]],
125-
accumulated_gradients: float | list[float | list[float]]
125+
accumulated_gradients: float | list[float | list[float]],
126126
) -> tuple[
127-
float | list[float | list[float]],
128-
float | list[float | list[float]]
127+
float | list[float | list[float]], float | list[float | list[float]]
129128
]:
130129
# Handle scalar case
131130
if isinstance(parameters, (int, float)):
@@ -156,9 +155,7 @@ def _adagrad_update_recursive(
156155
f"Shape mismatch: parameters length {len(parameters)} vs "
157156
f"gradients length {len(gradients)}"
158157
)
159-
raise ValueError(
160-
msg
161-
)
158+
raise ValueError(msg)
162159

163160
if accumulated_gradients is None:
164161
accumulated_gradients = [None] * len(parameters)
@@ -192,9 +189,7 @@ def _adagrad_update_recursive(
192189
new_acc_grads.append(new_ag)
193190
else:
194191
msg = f"Shape mismatch: inconsistent types {type(p)} vs {type(g)}"
195-
raise ValueError(
196-
msg
197-
)
192+
raise ValueError(msg)
198193

199194
return new_params, new_acc_grads
200195

neural_network/optimizers/adam.py

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def _adam_update_recursive(
167167
parameters: float | list,
168168
gradients: float | list,
169169
first_moment: float | list,
170-
second_moment: float | list
170+
second_moment: float | list,
171171
) -> tuple[float | list, float | list, float | list]:
172172
# Handle scalar case
173173
if isinstance(parameters, (int, float)):
@@ -203,9 +203,7 @@ def _adam_update_recursive(
203203
f"Shape mismatch: parameters length {len(parameters)} vs "
204204
f"gradients length {len(gradients)}"
205205
)
206-
raise ValueError(
207-
msg
208-
)
206+
raise ValueError(msg)
209207

210208
new_params = []
211209
new_first_moments = []
@@ -239,9 +237,7 @@ def _adam_update_recursive(
239237
new_second_moments.append(new_m2)
240238
else:
241239
msg = f"Shape mismatch: inconsistent types {type(p)} vs {type(g)}"
242-
raise ValueError(
243-
msg
244-
)
240+
raise ValueError(msg)
245241

246242
return new_params, new_first_moments, new_second_moments
247243

@@ -371,20 +367,14 @@ def rosenbrock_gradient(x: float, y: float) -> list[float]:
371367
f_final_adam = rosenbrock(x_adam[0], x_adam[1])
372368

373369
sgd_distance = math.sqrt((x_sgd[0] - 1) ** 2 + (x_sgd[1] - 1) ** 2)
374-
print(
375-
f"SGD: f = {f_final_sgd:.6f}, "
376-
f"distance to optimum = {sgd_distance:.4f}"
377-
)
370+
print(f"SGD: f = {f_final_sgd:.6f}, distance to optimum = {sgd_distance:.4f}")
378371
adagrad_distance = math.sqrt((x_adagrad[0] - 1) ** 2 + (x_adagrad[1] - 1) ** 2)
379372
print(
380373
f"Adagrad: f = {f_final_adagrad:.6f}, "
381374
f"distance to optimum = {adagrad_distance:.4f}"
382375
)
383376
adam_distance = math.sqrt((x_adam[0] - 1) ** 2 + (x_adam[1] - 1) ** 2)
384-
print(
385-
f"Adam: f = {f_final_adam:.6f}, "
386-
f"distance to optimum = {adam_distance:.4f}"
387-
)
377+
print(f"Adam: f = {f_final_adam:.6f}, distance to optimum = {adam_distance:.4f}")
388378

389379
# Determine best performer
390380
best_loss = min(f_final_sgd, f_final_adagrad, f_final_adam)

neural_network/optimizers/momentum_sgd.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def update(
117117
def _check_shapes_and_get_velocity(
118118
parameters: float | list[float | list[float]],
119119
gradients: float | list[float | list[float]],
120-
velocity_values: float | list[float | list[float]]
120+
velocity_values: float | list[float | list[float]],
121121
) -> tuple[
122122
float | list[float | list[float]], float | list[float | list[float]]
123123
]:
@@ -146,9 +146,7 @@ def _check_shapes_and_get_velocity(
146146
f"Shape mismatch: parameters length {len(parameters)} vs "
147147
f"gradients length {len(gradients)}"
148148
)
149-
raise ValueError(
150-
msg
151-
)
149+
raise ValueError(msg)
152150

153151
if velocity_values is None:
154152
velocity_values = [None] * len(parameters)
@@ -176,9 +174,7 @@ def _check_shapes_and_get_velocity(
176174
new_velocity.append(new_v)
177175
else:
178176
msg = f"Shape mismatch: inconsistent types {type(p)} vs {type(g)}"
179-
raise ValueError(
180-
msg
181-
)
177+
raise ValueError(msg)
182178

183179
return new_params, new_velocity
184180

neural_network/optimizers/nag.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def update(
119119
def _nag_update_recursive(
120120
parameters: float | list,
121121
gradients: float | list,
122-
velocity: float | list | None
122+
velocity: float | list | None,
123123
) -> tuple[float | list, float | list]:
124124
# Handle scalar case
125125
if isinstance(parameters, (int, float)):
@@ -150,9 +150,7 @@ def _nag_update_recursive(
150150
f"Shape mismatch: parameters length {len(parameters)} vs "
151151
f"gradients length {len(gradients)}"
152152
)
153-
raise ValueError(
154-
msg
155-
)
153+
raise ValueError(msg)
156154

157155
if velocity is None:
158156
velocity = [None] * len(parameters)
@@ -184,9 +182,7 @@ def _nag_update_recursive(
184182
new_velocity.append(new_v)
185183
else:
186184
msg = f"Shape mismatch: inconsistent types {type(p)} vs {type(g)}"
187-
raise ValueError(
188-
msg
189-
)
185+
raise ValueError(msg)
190186

191187
return new_params, new_velocity
192188

neural_network/optimizers/sgd.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def update(
9898

9999
def _check_and_update_recursive(
100100
parameters: float | list[float | list[float]],
101-
gradients: float | list[float | list[float]]
101+
gradients: float | list[float | list[float]],
102102
) -> float | list[float | list[float]]:
103103
# Handle 1D case (list of floats)
104104
if isinstance(parameters, (int, float)):
@@ -114,9 +114,7 @@ def _check_and_update_recursive(
114114
f"Shape mismatch: parameters length {len(parameters)} vs "
115115
f"gradients length {len(gradients)}"
116116
)
117-
raise ValueError(
118-
msg
119-
)
117+
raise ValueError(msg)
120118

121119
result = []
122120
for p, g in zip(parameters, gradients):
@@ -128,9 +126,7 @@ def _check_and_update_recursive(
128126
result.append(p - self.learning_rate * g)
129127
else:
130128
msg = f"Shape mismatch: inconsistent types {type(p)} vs {type(g)}"
131-
raise ValueError(
132-
msg
133-
)
129+
raise ValueError(msg)
134130

135131
return result
136132

0 commit comments

Comments
 (0)