diff --git a/kernel_tuner/runners/sequential.py b/kernel_tuner/runners/sequential.py index b4fc18c5..3ee43be0 100644 --- a/kernel_tuner/runners/sequential.py +++ b/kernel_tuner/runners/sequential.py @@ -125,7 +125,7 @@ def run(self, parameter_space, tuning_options): return results def config_in_cache(self, x_int, tuning_options): - if self.cache_manager: + if self.cache_manager and tuning_options.strategy_options['check_and_retrieve']: return ray.get(self.cache_manager.check_and_retrieve.remote(x_int)) elif tuning_options.cache and x_int in tuning_options.cache: return tuning_options.cache[x_int] diff --git a/kernel_tuner/strategies/brute_force.py b/kernel_tuner/strategies/brute_force.py index b08efea0..ac5ae985 100644 --- a/kernel_tuner/strategies/brute_force.py +++ b/kernel_tuner/strategies/brute_force.py @@ -9,6 +9,7 @@ def tune(searchspace: Searchspace, runner, tuning_options): if isinstance(runner, ParallelRunner): + tuning_options.strategy_options['check_and_retrieve'] = False cache_manager = CacheManager.remote(tuning_options.cache, tuning_options.cachefile) return runner.run(parameter_space=searchspace.sorted_list(), tuning_options=tuning_options, cache_manager=cache_manager) else: diff --git a/kernel_tuner/strategies/ensemble.py b/kernel_tuner/strategies/ensemble.py index 4c16b4f8..2a19f9f7 100644 --- a/kernel_tuner/strategies/ensemble.py +++ b/kernel_tuner/strategies/ensemble.py @@ -66,6 +66,7 @@ def tune(searchspace: Searchspace, runner, tuning_options, cache_manager=None, a if 'bayes_opt' in ensemble: # All strategies start from a random sample except for BO tuning_options.strategy_options["samplingmethod"] = 'random' tuning_options.strategy_options["max_fevals"] = options.get("max_fevals", 100 * ensemble_size) + tuning_options.strategy_options['check_and_retrieve'] = True if num_devices < ensemble_size: warnings.warn("Number of devices is less than the number of strategies in the ensemble. Some strategies will wait until devices are available.", UserWarning)