From 4401570c9670f24546709313ef6bac294fad2854 Mon Sep 17 00:00:00 2001 From: AdrienTaylor <10559960+AdrienTaylor@users.noreply.github.com> Date: Fri, 7 Nov 2025 08:25:50 +0100 Subject: [PATCH] updated expensive Loja formulations + tests + whatsnew --- ...uadratic_lojasiewicz_function_expensive.py | 41 ++-- docs/source/whatsnew.rst | 1 + docs/source/whatsnew/0.4.1.rst | 6 + .../__init__.py | 4 + ...descent_quadratic_lojasiewicz_expensive.py | 175 ++++++++++++++++ ...ic_lojasiewicz_function_super_expensive.py | 190 ++++++++++++++++++ tests/test_uselessly_complexified_examples.py | 26 +++ 7 files changed, 426 insertions(+), 17 deletions(-) create mode 100644 docs/source/whatsnew/0.4.1.rst create mode 100644 tests/additional_complexified_examples_tests/gradient_descent_quadratic_lojasiewicz_expensive.py create mode 100644 tests/additional_complexified_examples_tests/smooth_quadratic_lojasiewicz_function_super_expensive.py diff --git a/PEPit/functions/smooth_quadratic_lojasiewicz_function_expensive.py b/PEPit/functions/smooth_quadratic_lojasiewicz_function_expensive.py index 43d4f09c..7b969d1f 100644 --- a/PEPit/functions/smooth_quadratic_lojasiewicz_function_expensive.py +++ b/PEPit/functions/smooth_quadratic_lojasiewicz_function_expensive.py @@ -162,24 +162,31 @@ def add_class_constraints(self): xi - xj) ** 2 B = (self.L + self.mu) * (fi - fs - 1 / (2 * self.L) * gi ** 2) C = (self.L - self.mu) * (fj - fs + 1 / (2 * self.L) * gj ** 2) - - D = 2 * self.mu * (B - C - (self.L + 3 * self.mu) * A) / (2 * self.L + self.mu) - E = 4 * self.mu ** 2 * ((self.L + self.mu) * A - 2 * B) / (2 * self.L + self.mu) ** 2 - F = - 2 * self.mu * A - D - E - 8 * self.mu ** 3 * B / (2 * self.L + self.mu) ** 3 - M13 = Expression() - M14 = Expression() - M22 = - 6 * self.mu * A - D - 2 * M13 - M24 = Expression() - M33 = - 6 * self.mu * A - 2 * D - E - 2 * M24 - - T = np.array([[-2 * self.mu * A, 0, M13, M14], - [0, M22, -M14, M24], - [M13, -M14, M33, 0], - [M14, M24, 0, F]], dtype=Expression) - - psd_matrix = PSDMatrix(matrix_of_expressions=T, + Mt11 = - A * ( 2 * self.L + self.mu ) + Mt12 = Expression() + Mt22 = Expression() + + D = B - C - ( self.L + 3 * self.mu ) * A + + M11 = Mt11 - 4 * self.mu / ( 2 * self.L + self.mu) * Mt12 - D + M12 = Mt12 - self.mu / ( 2 * self.L + self.mu) * Mt22 - (self.L + self.mu) / 2 * A + B + M22 = Mt22 - B + + T1 = np.array([[M11, M12], + [M12, M22]], dtype=Expression) + + psd_matrix = PSDMatrix(matrix_of_expressions=T1, + name="IC_{}_{}({}, {})".format(function_id, + "smooth_quadratic_lojasiewicz_lmi_1", + xi_id, xj_id)) + self.list_of_class_psd.append(psd_matrix) + + T2 = np.array([[Mt11, Mt12], + [Mt12, Mt22]], dtype=Expression) + + psd_matrix = PSDMatrix(matrix_of_expressions=T2, name="IC_{}_{}({}, {})".format(function_id, - "smooth_quadratic_lojasiewicz_lmi", + "smooth_quadratic_lojasiewicz_lmi_2", xi_id, xj_id)) self.list_of_class_psd.append(psd_matrix) diff --git a/docs/source/whatsnew.rst b/docs/source/whatsnew.rst index 611824ae..a2707307 100644 --- a/docs/source/whatsnew.rst +++ b/docs/source/whatsnew.rst @@ -12,3 +12,4 @@ What's new in PEPit whatsnew/0.3.2 whatsnew/0.3.3 whatsnew/0.4.0 + whatsnew/0.4.1 diff --git a/docs/source/whatsnew/0.4.1.rst b/docs/source/whatsnew/0.4.1.rst new file mode 100644 index 00000000..aa173eab --- /dev/null +++ b/docs/source/whatsnew/0.4.1.rst @@ -0,0 +1,6 @@ +What's new in PEPit 0.4.1 +========================= + +- Improvement: The :class:`SmoothQuadraticLojasiewiczFunctionExpensive` has been improved with slightly cheaper formulations. + +- Added uselessly complexified tests to verify numerically that the formulations are in line. diff --git a/tests/additional_complexified_examples_tests/__init__.py b/tests/additional_complexified_examples_tests/__init__.py index b1b1a0eb..95ac955f 100644 --- a/tests/additional_complexified_examples_tests/__init__.py +++ b/tests/additional_complexified_examples_tests/__init__.py @@ -12,6 +12,8 @@ from .proximal_point_LMI import wc_proximal_point_complexified3 from .randomized_coordinate_descent_smooth_convex import wc_randomized_coordinate_descent_smooth_convex_complexified from .randomized_coordinate_descent_smooth_strongly_convex import wc_randomized_coordinate_descent_smooth_strongly_convex_complexified +from .smooth_quadratic_lojasiewicz_function_super_expensive import SmoothQuadraticLojasiewiczFunctionComplexified +from .gradient_descent_quadratic_lojasiewicz_expensive import wc_gradient_descent_quadratic_lojasiewicz_complexified __all__ = ['cyclic_coordinate_descent_refined', 'wc_cyclic_coordinate_descent_refined', @@ -28,4 +30,6 @@ 'proximal_point_LMI', 'wc_proximal_point_complexified3', 'randomized_coordinate_descent_smooth_convex', 'wc_randomized_coordinate_descent_smooth_convex_complexified', 'randomized_coordinate_descent_smooth_strongly_convex', 'wc_randomized_coordinate_descent_smooth_strongly_convex_complexified', + 'smooth_quadratic_lojasiewicz_function_super_expensive', 'SmoothQuadraticLojasiewiczFunctionComplexified', + 'gradient_descent_quadratic_lojasiewicz_expensive', 'wc_gradient_descent_quadratic_lojasiewicz_complexified', ] diff --git a/tests/additional_complexified_examples_tests/gradient_descent_quadratic_lojasiewicz_expensive.py b/tests/additional_complexified_examples_tests/gradient_descent_quadratic_lojasiewicz_expensive.py new file mode 100644 index 00000000..9a5a4cec --- /dev/null +++ b/tests/additional_complexified_examples_tests/gradient_descent_quadratic_lojasiewicz_expensive.py @@ -0,0 +1,175 @@ +from PEPit import PEP +from tests.additional_complexified_examples_tests import SmoothQuadraticLojasiewiczFunctionComplexified +import numpy as np + + +def wc_gradient_descent_quadratic_lojasiewicz_complexified(L, mu, gamma, n, wrapper="cvxpy", solver=None, verbose=1): + """ + Consider the minimization problem + + .. math:: f_\\star \\triangleq \\min_x f(x), + + where :math:`f` is :math:`L`-smooth and satisfies a quadratic Lojasiewicz inequality: + + .. math:: f(x)-f_\\star \\leqslant \\frac{1}{2\\mu}\|\\nabla f(x) \|^2, + + details can be found in [1,2,3]. The example here relies on the :class:`SmoothQuadraticLojasiewiczFunctionExpensive` + description of smooth Lojasiewicz functions (based on [5, Proposition 3.4]). + + This code computes a worst-case guarantee for **gradient descent** with fixed step-size :math:`\\gamma`. + That is, it computes the smallest possible :math:`\\tau(n, L, \\gamma)` such that the guarantee + + .. math:: f(x_n)-f_\\star \\leqslant \\tau(n, L, \\mu, \\gamma) (f(x_0) - f(x_\\star)) + + is valid, where :math:`x_n` is the n-th iterates obtained with the gradient method with fixed step-size. + + **Algorithm**: + Gradient descent is described as follows, for :math:`t \in \\{ 0, \\dots, n-1\\}`, + + .. math:: x_{t+1} = x_t - \\gamma \\nabla f(x_t), + + where :math:`\\gamma` is a step-size and. + + **Theoretical guarantee**: We compare with the guarantees from [4, Theorem 3]. + + **References**: + `[1] S. Lojasiewicz (1963). + Une propriété topologique des sous-ensembles analytiques réels. + Les équations aux dérivées partielles, 117 (1963), 87–89. + `_ + + `[2] B. Polyak (1963). + Gradient methods for the minimisation of functionals + USSR Computational Mathematics and Mathematical Physics 3(4), 864–878. + `_ + + `[3] J. Bolte, A. Daniilidis, and A. Lewis (2007). + The Łojasiewicz inequality for nonsmooth subanalytic functions with applications to subgradient dynamical systems. + SIAM Journal on Optimization 17, 1205–1223. + `_ + + `[4] H. Abbaszadehpeivasti, E. de Klerk, M. Zamani (2023). + Conditions for linear convergence of the gradient method for non-convex optimization. + Optimization Letters. + `_ + + `[5] A. Rubbens, J.M. Hendrickx, A. Taylor (2025). + A constructive approach to strengthen algebraic descriptions of function and operator classes (Version 1). + `_ + + Args: + L (float): the smoothness parameter. + mu (float): Lojasiewicz parameter. + gamma (float): step-size. + n (int): number of iterations. + wrapper (str): the name of the wrapper to be used. + solver (str): the name of the solver the wrapper should use. + verbose (int): level of information details to print. + + - -1: No verbose at all. + - 0: This example's output. + - 1: This example's output + PEPit information. + - 2: This example's output + PEPit information + solver details. + + Returns: + pepit_tau (float): worst-case value. + theoretical_tau (float): theoretical value. + + Example: + >>> L, mu, gamma, n = 1, .2, 1, 1 + >>> pepit_tau, theoretical_tau = wc_gradient_descent_quadratic_lojasiewicz_expensive(L=L, gamma=gamma, n=n, wrapper="cvxpy", solver=None, verbose=1) + (PEPit) Setting up the problem: size of the Gram matrix: 4x4 + (PEPit) Setting up the problem: performance measure is the minimum of 1 element(s) + (PEPit) Setting up the problem: Adding initial conditions and general constraints ... + (PEPit) Setting up the problem: initial conditions and general constraints (1 constraint(s) added) + (PEPit) Setting up the problem: interpolation conditions for 1 function(s) + Function 1 : Adding 4 scalar constraint(s) ... + Function 1 : 4 scalar constraint(s) added + Function 1 : Adding 6 lmi constraint(s) ... + Size of PSD matrix 1: 4x4 + Size of PSD matrix 2: 4x4 + Size of PSD matrix 3: 4x4 + Size of PSD matrix 4: 4x4 + Size of PSD matrix 5: 4x4 + Size of PSD matrix 6: 4x4 + Function 1 : 6 lmi constraint(s) added + (PEPit) Setting up the problem: additional constraints for 0 function(s) + (PEPit) Compiling SDP + (PEPit) Calling SDP solver + (PEPit) Solver status: optimal (wrapper:cvxpy, solver: MOSEK); optimal value: 0.6832669556328734 + (PEPit) Primal feasibility check: + The solver found a Gram matrix that is positive semi-definite + All required PSD matrices are indeed positive semi-definite up to an error of 1.0099203333404037e-09 + All the primal scalar constraints are verified + (PEPit) Dual feasibility check: + The solver found a residual matrix that is positive semi-definite + All the dual matrices to lmi are positive semi-definite + All the dual scalar values associated with inequality constraints are nonnegative up to an error of 5.671954340368105e-10 + (PEPit) The worst-case guarantee proof is perfectly reconstituted up to an error of 2.0306640495891322e-08 + (PEPit) Final upper bound (dual): 0.6832669563172779 and lower bound (primal example): 0.6832669556328734 + (PEPit) Duality gap: absolute: 6.844044220244427e-10 and relative: 1.0016647466735981e-09 + *** Example file: worst-case performance of gradient descent with fixed step-size *** + *** (smooth problem satisfying a Lojasiewicz inequality; expensive version) *** + PEPit guarantee: f(x_1) - f(x_*) <= 0.683267 (f(x_0)-f_*) + Theoretical guarantee: f(x_1) - f(x_*) <= 0.727273 (f(x_0)-f_*) + + """ + # Instantiate PEP + problem = PEP() + + # Declare a smooth function satisfying a quadratic Lojasiewicz inequality + func = problem.declare_function(SmoothQuadraticLojasiewiczFunctionComplexified, L=L, mu=mu) + + # Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_* + xs = func.stationary_point() + fs = func(xs) + + # Then define the starting point x0 of the algorithm + x0 = problem.set_initial_point() + + # Set the initial constraint that is the distance between x0 and x^* + problem.set_initial_condition(func(x0) - fs <= 1) + + # Run gradient descent + x = x0 + for i in range(n): + g = func.gradient(x) + x = x - gamma * g + + # Set up performance measure + problem.set_performance_metric(func(x) - fs) + + # Solve + pepit_verbose = max(verbose, 0) + pepit_tau = problem.solve(wrapper=wrapper, solver=solver, verbose=pepit_verbose) + + # Compute theoretical guarantee (see bounds in [4, Theorem 3]) + m, mp = -L, mu + if 0 <= gamma <= 1 / L: + theoretical_tau = (mp * (1 - L * gamma) + np.sqrt( + (L - m) * (m - mp) * (2 - L * gamma) * mp * gamma + (L - m) ** 2) ** 2 / (L - m + mp) ** 2) + elif 1 / L <= gamma <= 3 / (m + L + np.sqrt(m ** 2 - L * m + L ** 2)): + theoretical_tau = ((L * gamma - 2) * (m * gamma - 2) * mp * gamma) / ((L + m - mp) * gamma - 2) + 1 + elif 3 / (m + L + np.sqrt(m ** 2 - m * L + L ** 2)) <= gamma <= 2 / L: + theoretical_tau = (L * gamma - 1) ** 2 / ((L * gamma - 1) ** 2 + mp * gamma * (2 - L * gamma)) + else: + theoretical_tau = None + + theoretical_tau = theoretical_tau ** n + + # Print conclusion if required + if verbose != -1: + print('*** Example file: worst-case performance of gradient descent with fixed step-size ***') + print('*** \t (smooth problem satisfying a Lojasiewicz inequality; expensive version) ***') + print('\tPEPit guarantee:\t f(x_1) - f(x_*) <= {:.6} (f(x_0)-f_*)'.format(pepit_tau)) + print('\tTheoretical guarantee:\t f(x_1) - f(x_*) <= {:.6} (f(x_0)-f_*)'.format(theoretical_tau)) + + # Return the worst-case guarantee of the evaluated method (and the reference theoretical value) + return pepit_tau, theoretical_tau + + +if __name__ == "__main__": + L, mu, gamma, n = 1, .2, 1, 1 + pepit_tau, theoretical_tau = wc_gradient_descent_quadratic_lojasiewicz_complexified(L=L, mu=mu, gamma=gamma, n=n, + wrapper="cvxpy", solver=None, + verbose=1) diff --git a/tests/additional_complexified_examples_tests/smooth_quadratic_lojasiewicz_function_super_expensive.py b/tests/additional_complexified_examples_tests/smooth_quadratic_lojasiewicz_function_super_expensive.py new file mode 100644 index 00000000..f6af19cd --- /dev/null +++ b/tests/additional_complexified_examples_tests/smooth_quadratic_lojasiewicz_function_super_expensive.py @@ -0,0 +1,190 @@ +import numpy as np + +from PEPit.function import Function +from PEPit.expression import Expression +from PEPit import PSDMatrix + + +class SmoothQuadraticLojasiewiczFunctionComplexified(Function): + """ + The :class:`SmoothQuadraticLojasiewiczFunctionSuperExpensive` class overwrites the `add_class_constraints` method + of :class:`Function`, implementing some constraints (which are not necessary and sufficient for interpolation) + for the class of smooth (not necessarily convex) functions that also satisfy a quadratic Lojasiewicz inequality + (sometimes also referred to as a Polyak-Lojasiewicz inequality). Extensive descriptions of such classes of + functions can be found in [1, 2]. + + The conditions implemented here are presented in [3 (version 1 on arXiv), Proposition 3.4], that have been + improved since then [4, Proposition 3.4]. + + Warning: + Smooth functions satisfying a Lojasiewicz property do not enjoy known interpolation conditions. + The conditions implemented in this class are necessary but a priori not sufficient for interpolation. + Hence, the numerical results obtained when using this class might be non-tight upper bounds. + + Attributes: + L (float): smoothness parameter + mu (float): quadratic Lojasiewicz parameter + + Example: + >>> from PEPit import PEP + >>> from PEPit.functions import SmoothQuadraticLojasiewiczFunctionExpensive + >>> problem = PEP() + >>> h = problem.declare_function(function_class=SmoothQuadraticLojasiewiczFunctionExpensive, mu=.1, L=1.) + + References: + + `[1] S. Lojasiewicz (1963). + Une propriété topologique des sous-ensembles analytiques réels. + Les équations aux dérivées partielles, 117 (1963), 87–89. + `_ + + `[2] J. Bolte, A. Daniilidis, and A. Lewis (2007). + The Łojasiewicz inequality for nonsmooth subanalytic functions with applications to subgradient dynamical systems. + SIAM Journal on Optimization 17, 1205–1223. + `_ + + `[3] A. Rubbens, J.M. Hendrickx, A. Taylor (2025). + A constructive approach to strengthen algebraic descriptions of function and operator classes (Version 1). + `_ + + `[4] A. Rubbens, J.M. Hendrickx, A. Taylor (2025). + A constructive approach to strengthen algebraic descriptions of function and operator classes. + `_ + + """ + + def __init__(self, + L, + mu, + is_leaf=True, + decomposition_dict=None, + reuse_gradient=True, + name=None): + """ + Args: + L (float): The smoothness parameter. + mu (float): The quadratic Lojasiewicz parameter. + is_leaf (bool): True if self is defined from scratch. + False if self is defined as linear combination of leaf. + decomposition_dict (dict): Decomposition of self as linear combination of leaf :class:`Function` objects. + Keys are :class:`Function` objects and values are their associated coefficients. + reuse_gradient (bool): If True, the same subgradient is returned + when one requires it several times on the same :class:`Point`. + If False, a new subgradient is computed each time one is required. + name (str): name of the object. None by default. Can be updated later through the method `set_name`. + + Note: + Smooth functions are necessarily differentiable, hence `reuse_gradient` is set to True. + + """ + + super().__init__(is_leaf=is_leaf, + decomposition_dict=decomposition_dict, + reuse_gradient=True, + name=name, + ) + + assert 0 <= mu <= L + + # Store L and mu + self.mu = mu + self.L = L + + def set_quadratic_lojasiewicz_constraint_i_j(self, + xi, gi, fi, + xj, gj, fj, + ): + """ + Formulates necessary interpolation constraints for quadratic Lojasiewicz functions, see [3, Proposition 3.4]. + + """ + + constraint = (fi - fj <= 1 / (2 * self.mu) * gi ** 2) + + return constraint + + def set_upper_quadratic_lojasiewicz_constraint_i_j(self, + xi, gi, fi, + xj, gj, fj, + ): + """ + Formulates necessary interpolation constraints for smooth functions, see [3, Proposition 3.4]. + The name refers to the fact this inequality is very similar to the classical quadratic Lojasiewicz inequality + in the reverse sense. + + """ + + constraint = (fi - fj >= 1 / (2 * self.L) * gi ** 2) + + return constraint + + def add_class_constraints(self): + """ + Formulates a list of necessary conditions for interpolation of self (smooth Lojasiewicz functions), + see, e.g., discussions around [3, Proposition 3.4]. + + """ + # Check that there is at least one stationary point encoded; + # if not, create one. + if self.list_of_stationary_points == list(): + self.stationary_point() + + self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, + list_of_points_2=self.list_of_stationary_points, + constraint_name="quadratic_lojasiewicz", + set_class_constraint_i_j=self.set_quadratic_lojasiewicz_constraint_i_j, + ) + + self.add_constraints_from_two_lists_of_points(list_of_points_1=self.list_of_points, + list_of_points_2=self.list_of_stationary_points, + constraint_name="upper_quadratic_lojasiewicz", + set_class_constraint_i_j=self.set_upper_quadratic_lojasiewicz_constraint_i_j, + ) + + # Set function ID + function_id = self.get_name() + if function_id is None: + function_id = "Function_{}".format(self.counter) + + # Browse list of points and create necessary constraints for interpolation [3, Lemma 3.4] + _, _, fs = self.list_of_stationary_points[0] + for i, point_i in enumerate(self.list_of_points): + + xi, gi, fi = point_i + xi_id = xi.get_name() + if xi_id is None: + xi_id = "Point_{}".format(i) + + for j, point_j in enumerate(self.list_of_points): + + xj, gj, fj = point_j + xj_id = xj.get_name() + if xj_id is None: + xj_id = "Point_{}".format(j) + + if point_i != point_j: + A = - fi + fj + 1 / 2 * (gi + gj) * (xi - xj) + 1 / (4 * self.L) * (gi - gj) ** 2 - self.L / 4 * ( + xi - xj) ** 2 + B = (self.L + self.mu) * (fi - fs - 1 / (2 * self.L) * gi ** 2) + C = (self.L - self.mu) * (fj - fs + 1 / (2 * self.L) * gj ** 2) + + D = 2 * self.mu * (B - C - (self.L + 3 * self.mu) * A) / (2 * self.L + self.mu) + E = 4 * self.mu ** 2 * ((self.L + self.mu) * A - 2 * B) / (2 * self.L + self.mu) ** 2 + F = - 2 * self.mu * A - D - E - 8 * self.mu ** 3 * B / (2 * self.L + self.mu) ** 3 + + M13 = Expression() + M14 = Expression() + M22 = - 6 * self.mu * A - D - 2 * M13 + M24 = Expression() + M33 = - 6 * self.mu * A - 2 * D - E - 2 * M24 + + T = np.array([[-2 * self.mu * A, 0, M13, M14], + [0, M22, -M14, M24], + [M13, -M14, M33, 0], + [M14, M24, 0, F]], dtype=Expression) + + psd_matrix = PSDMatrix(matrix_of_expressions=T, + name="IC_{}_{}({}, {})".format(function_id, + "smooth_quadratic_lojasiewicz_lmi", + xi_id, xj_id)) + self.list_of_class_psd.append(psd_matrix) diff --git a/tests/test_uselessly_complexified_examples.py b/tests/test_uselessly_complexified_examples.py index 85b105e9..fb14f5a5 100644 --- a/tests/test_uselessly_complexified_examples.py +++ b/tests/test_uselessly_complexified_examples.py @@ -1,5 +1,7 @@ import unittest +from math import sqrt + from PEPit.examples.unconstrained_convex_minimization import wc_proximal_point from PEPit.examples.composite_convex_minimization import wc_proximal_gradient from PEPit.examples.unconstrained_convex_minimization import wc_gradient_exact_line_search @@ -8,6 +10,7 @@ wc_randomized_coordinate_descent_smooth_strongly_convex from PEPit.examples.stochastic_and_randomized_convex_minimization import wc_randomized_coordinate_descent_smooth_convex from PEPit.examples.unconstrained_convex_minimization import wc_cyclic_coordinate_descent +from PEPit.examples.nonconvex_optimization import wc_gradient_descent_quadratic_lojasiewicz_expensive as wc_gradient_lojasiewicz_a from tests.additional_complexified_examples_tests import wc_proximal_gradient_complexified from tests.additional_complexified_examples_tests import wc_proximal_gradient_complexified2 @@ -25,6 +28,8 @@ from tests.additional_complexified_examples_tests import wc_gradient_descent_blocks from tests.additional_complexified_examples_tests import wc_cyclic_coordinate_descent_refined +from tests.additional_complexified_examples_tests import wc_gradient_descent_quadratic_lojasiewicz_complexified as wc_gradient_lojasiewicz_b + class TestExamples(unittest.TestCase): @@ -189,3 +194,24 @@ def test_gradient_descent_blocks_one_block(self): wc_modified, theory = wc_gradient_descent_blocks(L=[L], n=n, verbose=self.verbose) self.assertAlmostEqual(wc_modified, theory, delta=self.relative_precision * theory) + + def test_lojasiewicz(self): + L, mu, n = 1, .2, 3 + + gamma = 1 / L + wc_a, _ = wc_gradient_lojasiewicz_a(L, mu, gamma, n, verbose=self.verbose) + wc_b, _ = wc_gradient_lojasiewicz_b(L, mu, gamma, n, verbose=self.verbose) + + self.assertAlmostEqual(wc_a, wc_b, delta=self.relative_precision * wc_a) + + gamma = (1 + sqrt(3)) / (2 * L) + wc_a, _ = wc_gradient_lojasiewicz_a(L, mu, gamma, n, verbose=self.verbose) + wc_b, _ = wc_gradient_lojasiewicz_b(L, mu, gamma, n, verbose=self.verbose) + + self.assertAlmostEqual(wc_a, wc_b, delta=self.relative_precision * wc_a) + + gamma = (1 + sqrt(3) / 2) / L + wc_a, _ = wc_gradient_lojasiewicz_a(L, mu, gamma, n, verbose=self.verbose) + wc_b, _ = wc_gradient_lojasiewicz_b(L, mu, gamma, n, verbose=self.verbose) + + self.assertAlmostEqual(wc_a, wc_b, delta=self.relative_precision * wc_a)