Skip to content

Commit

Permalink
Merge pull request #5 from pik-gane/parallelization
Browse files Browse the repository at this point in the history
Added option to specify the number of cpus for parallelization.
  • Loading branch information
JacobLoe authored Mar 4, 2022
2 parents 6802632 + 13e4455 commit 7839411
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 6 deletions.
12 changes: 10 additions & 2 deletions src/pyoptes/optimization/budget_allocation/target_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,8 @@ def evaluate(budget_allocation,
n_simulations=1,
aggregation=n_infected_animals,
statistic=mean_square_and_stderr,
parallel=False):
parallel=False,
num_cpu_cores=2):
"""Run the SIModelOnTransmissions a single time, using the given budget
allocation, and return the number of nodes infected at the time the
simulation is stopped. Since the simulated process is a stochastic
Expand All @@ -177,6 +178,7 @@ def evaluate(budget_allocation,
- lambda a: np.percentile(a, 95)
- lambda a: np.mean(a**2)
@param num_cpu_cores: int, specifies the number of cpus for parallelization. Use -1 to use all cpus.
@param aggregation: any function converting an array of infection bools into an aggregated "damage"
@param parallel: (bool) Sets whether the simulations runs are computed in parallel. Default is set to True.
@param budget_allocation: (array of floats) expected number of tests per
Expand All @@ -198,7 +200,13 @@ def evaluate(budget_allocation,
model.daily_test_probabilities = budget_allocation / 365

if parallel:
with Pool(cpu_count()) as pool:
# check whether the number of cpus are available
if num_cpu_cores > cpu_count():
num_cpu_cores = cpu_count()
# use all cpus available
elif num_cpu_cores == -1:
num_cpu_cores = cpu_count()
with Pool(num_cpu_cores) as pool:
results = pool.map(partial(task, aggregation=aggregation), range(n_simulations))
else:
results = [task(sim, aggregation) for sim in range(n_simulations)]
Expand Down
7 changes: 3 additions & 4 deletions src/test_parallelization.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@
fp.prepare(
n_nodes=n_nodes, # instead of 60000, since this should suffice in the beginning
capacity_distribution=np.random.lognormal, # this is more realistic than a uniform distribution
delta_t_symptoms=60, # instead of 30, since this gave a clearer picture in Sara's simulations
parallel=True)
delta_t_symptoms=60) # instead of 30, since this gave a clearer picture in Sara's simulations)

total_budget = 1.0 * n_nodes

Expand All @@ -38,7 +37,7 @@

a = time()
# y = np.mean([fp.evaluate(x, n_simulations=n_simulations) for _ in range(100)])
y = fp.evaluate(x, n_simulations=n_simulations, parallel=True)
y = fp.evaluate(x, n_simulations=n_simulations, parallel=True, num_cpu_cores=-1)
b = time()-a
print('Parallel simulation')
print(f'Time for {n_simulations} simulations of: {b}')
Expand All @@ -57,7 +56,7 @@
tl.append(time() - a)

a = time()
ylp.append(fp.evaluate(x, n_simulations=s, parallel=True))
ylp.append(fp.evaluate(x, n_simulations=s, parallel=True, num_cpu_cores=-1))
tlp.append(time() - a)

plt.plot(n, yl, label='y non-parallel')
Expand Down

0 comments on commit 7839411

Please sign in to comment.