Skip to content

Commit 2cf43bc

Browse files
committed
Fix remaining ruff compliance issues
- Fixed all line-length violations in Adam and Adagrad optimizers - Fixed abstract method issue in BaseOptimizer by providing proper implementation - Improved code readability by extracting distance calculations - Should resolve all 10 remaining CI failures
1 parent b2c3419 commit 2cf43bc

File tree

3 files changed

+25
-9
lines changed

3 files changed

+25
-9
lines changed

neural_network/optimizers/adagrad.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,9 @@ def _adagrad_update_recursive(
168168
new_params = []
169169
new_acc_grads = []
170170

171-
for _i, (p, g, ag) in enumerate(zip(parameters, gradients, accumulated_gradients)):
171+
for _i, (p, g, ag) in enumerate(
172+
zip(parameters, gradients, accumulated_gradients)
173+
):
172174
if isinstance(p, list) and isinstance(g, list):
173175
# Recursive case for nested lists
174176
new_p, new_ag = _adagrad_update_recursive(p, g, ag)
@@ -211,7 +213,8 @@ def _initialize_like(
211213
self, gradients: list[float] | list[list[float]]
212214
) -> list[float] | list[list[float]]:
213215
"""
214-
Initialize accumulated gradients with same structure as gradients, filled with zeros.
216+
Initialize accumulated gradients with same structure as gradients,
217+
filled with zeros.
215218
216219
Args:
217220
gradients: Reference structure for initialization
@@ -287,7 +290,8 @@ def __str__(self) -> str:
287290
f" SGD: f = {f_sgd:8.3f}, x = ({x_sgd[0]:6.3f}, {x_sgd[1]:6.3f})"
288291
)
289292
print(
290-
f" Adagrad: f = {f_adagrad:8.3f}, x = ({x_adagrad[0]:6.3f}, {x_adagrad[1]:6.3f})"
293+
f" Adagrad: f = {f_adagrad:8.3f}, x = "
294+
f"({x_adagrad[0]:6.3f}, {x_adagrad[1]:6.3f})"
291295
)
292296

293297
print("\\nFinal comparison:")

neural_network/optimizers/adam.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,9 @@ def _adam_update_recursive(
177177
)
178178

179179
# Update first moment: m = β₁ * m + (1-β₁) * g
180-
new_first_moment = self.beta1 * first_moment + (1 - self.beta1) * gradients
180+
new_first_moment = (
181+
self.beta1 * first_moment + (1 - self.beta1) * gradients
182+
)
181183

182184
# Update second moment: v = β₂ * v + (1-β₂) * g²
183185
new_second_moment = self.beta2 * second_moment + (1 - self.beta2) * (
@@ -355,25 +357,33 @@ def rosenbrock_gradient(x: float, y: float) -> list[float]:
355357
f" SGD: f = {f_sgd:10.3f}, x = ({x_sgd[0]:6.3f}, {x_sgd[1]:6.3f})"
356358
)
357359
print(
358-
f" Adagrad: f = {f_adagrad:10.3f}, x = ({x_adagrad[0]:6.3f}, {x_adagrad[1]:6.3f})"
360+
f" Adagrad: f = {f_adagrad:10.3f}, x = "
361+
f"({x_adagrad[0]:6.3f}, {x_adagrad[1]:6.3f})"
359362
)
360363
print(
361-
f" Adam: f = {f_adam:10.3f}, x = ({x_adam[0]:6.3f}, {x_adam[1]:6.3f})"
364+
f" Adam: f = {f_adam:10.3f}, x = "
365+
f"({x_adam[0]:6.3f}, {x_adam[1]:6.3f})"
362366
)
363367

364368
print("\\nFinal Results (target: x=1, y=1, f=0):")
365369
f_final_sgd = rosenbrock(x_sgd[0], x_sgd[1])
366370
f_final_adagrad = rosenbrock(x_adagrad[0], x_adagrad[1])
367371
f_final_adam = rosenbrock(x_adam[0], x_adam[1])
368372

373+
sgd_distance = math.sqrt((x_sgd[0] - 1) ** 2 + (x_sgd[1] - 1) ** 2)
369374
print(
370-
f"SGD: f = {f_final_sgd:.6f}, distance to optimum = {math.sqrt((x_sgd[0] - 1) ** 2 + (x_sgd[1] - 1) ** 2):.4f}"
375+
f"SGD: f = {f_final_sgd:.6f}, "
376+
f"distance to optimum = {sgd_distance:.4f}"
371377
)
378+
adagrad_distance = math.sqrt((x_adagrad[0] - 1) ** 2 + (x_adagrad[1] - 1) ** 2)
372379
print(
373-
f"Adagrad: f = {f_final_adagrad:.6f}, distance to optimum = {math.sqrt((x_adagrad[0] - 1) ** 2 + (x_adagrad[1] - 1) ** 2):.4f}"
380+
f"Adagrad: f = {f_final_adagrad:.6f}, "
381+
f"distance to optimum = {adagrad_distance:.4f}"
374382
)
383+
adam_distance = math.sqrt((x_adam[0] - 1) ** 2 + (x_adam[1] - 1) ** 2)
375384
print(
376-
f"Adam: f = {f_final_adam:.6f}, distance to optimum = {math.sqrt((x_adam[0] - 1) ** 2 + (x_adam[1] - 1) ** 2):.4f}"
385+
f"Adam: f = {f_final_adam:.6f}, "
386+
f"distance to optimum = {adam_distance:.4f}"
377387
)
378388

379389
# Determine best performer

neural_network/optimizers/base_optimizer.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,8 @@ def reset(self) -> None:
7777
or when you want to clear any accumulated state (like momentum).
7878
Default implementation does nothing, but optimizers with state should override.
7979
"""
80+
# Default implementation does nothing - optimizers with state should override
81+
return
8082

8183
def __str__(self) -> str:
8284
"""String representation of the optimizer."""

0 commit comments

Comments
 (0)