From 3ff0bebe7ee2413fe9e3bc56601bcf67705bf9ae Mon Sep 17 00:00:00 2001 From: stijn Date: Wed, 18 Jun 2025 15:12:45 +0200 Subject: [PATCH 1/3] Change `CostFunc` to return `+inf` when `objective_higher_is_better` Previously, `CostFunc` would return `-inf` when `object_higher_is_better==True`. However, all search strategies implicitly assume that lower is better. --- kernel_tuner/strategies/common.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/kernel_tuner/strategies/common.py b/kernel_tuner/strategies/common.py index d01eae937..00700cd77 100644 --- a/kernel_tuner/strategies/common.py +++ b/kernel_tuner/strategies/common.py @@ -3,6 +3,7 @@ from time import perf_counter import numpy as np +import numbers from kernel_tuner import util from kernel_tuner.searchspace import Searchspace @@ -109,8 +110,15 @@ def __call__(self, x, check_restrictions=True): self.runner.last_strategy_start_time = perf_counter() # get numerical return value, taking optimization direction into account - return_value = result[self.tuning_options.objective] or sys.float_info.max - return_value = return_value if not self.tuning_options.objective_higher_is_better else -return_value + return_value = result[self.tuning_options.objective] + + if isinstance(return_value, numbers.Number): + if self.tuning_options.objective_higher_is_better: + # flip the sign if higher means better + return_value = -return_value + else: + # this is not a valid configuration, just return max + return_value = sys.float_info.max return return_value From c84227ddefb17f3c4d2fb191ec1c045dc2a738cd Mon Sep 17 00:00:00 2001 From: stijn Date: Fri, 20 Jun 2025 11:43:21 +0200 Subject: [PATCH 2/3] Remove check for `objective_higher_is_better==True` in simulated annealing, as it is already done by `CostFunc` --- kernel_tuner/strategies/simulated_annealing.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/kernel_tuner/strategies/simulated_annealing.py b/kernel_tuner/strategies/simulated_annealing.py index dce929b7b..71c8058d1 100644 --- a/kernel_tuner/strategies/simulated_annealing.py +++ b/kernel_tuner/strategies/simulated_annealing.py @@ -54,7 +54,7 @@ def tune(searchspace: Searchspace, runner, tuning_options): print(e) return cost_func.results - ap = acceptance_prob(old_cost, new_cost, T, tuning_options) + ap = acceptance_prob(old_cost, new_cost, T) r = random.random() if ap > r: @@ -85,9 +85,9 @@ def tune(searchspace: Searchspace, runner, tuning_options): tune.__doc__ = common.get_strategy_docstring("Simulated Annealing", _options) -def acceptance_prob(old_cost, new_cost, T, tuning_options): +def acceptance_prob(old_cost, new_cost, T): """Annealing equation, with modifications to work towards a lower value.""" - error_val = sys.float_info.max if not tuning_options.objective_higher_is_better else -sys.float_info.max + error_val = sys.float_info.max # if start pos is not valid, always move if old_cost == error_val: return 1.0 @@ -98,8 +98,6 @@ def acceptance_prob(old_cost, new_cost, T, tuning_options): if new_cost < old_cost: return 1.0 # maybe move if old cost is better than new cost depending on T and random value - if tuning_options.objective_higher_is_better: - return np.exp(((new_cost-old_cost)/new_cost)/T) return np.exp(((old_cost-new_cost)/old_cost)/T) From b3b94ec37c4805db56f3fb6f67ba469ca3bd9bd9 Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Wed, 25 Jun 2025 12:25:24 +0200 Subject: [PATCH 3/3] fixing SA after fix in costfunc --- .../strategies/simulated_annealing.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/kernel_tuner/strategies/simulated_annealing.py b/kernel_tuner/strategies/simulated_annealing.py index 71c8058d1..3a89ad54b 100644 --- a/kernel_tuner/strategies/simulated_annealing.py +++ b/kernel_tuner/strategies/simulated_annealing.py @@ -54,7 +54,7 @@ def tune(searchspace: Searchspace, runner, tuning_options): print(e) return cost_func.results - ap = acceptance_prob(old_cost, new_cost, T) + ap = acceptance_prob(old_cost, new_cost, T, tuning_options) r = random.random() if ap > r: @@ -85,20 +85,26 @@ def tune(searchspace: Searchspace, runner, tuning_options): tune.__doc__ = common.get_strategy_docstring("Simulated Annealing", _options) -def acceptance_prob(old_cost, new_cost, T): +def acceptance_prob(old_cost, new_cost, T, tuning_options): """Annealing equation, with modifications to work towards a lower value.""" error_val = sys.float_info.max + res = 0.0 # if start pos is not valid, always move if old_cost == error_val: - return 1.0 + res = 1.0 # if we have found a valid ps before, never move to nonvalid pos - if new_cost == error_val: - return 0.0 + elif new_cost == error_val: + res = 0.0 # always move if new cost is better - if new_cost < old_cost: - return 1.0 + elif new_cost < old_cost: + res = 1.0 # maybe move if old cost is better than new cost depending on T and random value - return np.exp(((old_cost-new_cost)/old_cost)/T) + else: + if tuning_options.objective_higher_is_better: + res = np.exp(((new_cost-old_cost)/new_cost)/T) + else: + res = np.exp(((old_cost-new_cost)/old_cost)/T) + return res def neighbor(pos, searchspace: Searchspace):