Calling Scripts

Below are example calling scripts used to populate specifications for each user function and libEnsemble before initiating libEnsemble via the primary libE() call. The primary libEnsemble-relevant portions have been highlighted in each example. Non-highlighted portions may include setup routines, compilation steps for user applications, or output processing. The first two scripts correspond to random sampling calculations, while the third corresponds to an optimization routine.

Many other examples of calling scripts can be found in libEnsemble’s regression tests.

Local Sine Tutorial

This example is from the Local Sine Tutorial, meant to run with Python’s multiprocessing as the primary comms method.

examples/tutorials/simple_sine/test_local_sine_tutorial.py
 1import numpy as np
 2from gest_api.vocs import VOCS
 3from sine_gen_std import RandomSample
 4from sine_sim import sim_find_sine
 5
 6from libensemble import Ensemble
 7from libensemble.specs import ExitCriteria, GenSpecs, LibeSpecs, SimSpecs
 8
 9if __name__ == "__main__":  # Python-quirk required on macOS and windows
10    libE_specs = LibeSpecs(nworkers=4, comms="local")
11
12    vocs = VOCS(variables={"x": [-3, 3]}, objectives={"y": "EXPLORE"})  # Configure our generator with this object
13
14    generator = RandomSample(vocs)  # Instantiate our generator
15
16    gen_specs = GenSpecs(
17        generator=generator,  # Pass our generator and config to libEnsemble
18        vocs=vocs,
19        batch_size=4,
20    )
21
22    sim_specs = SimSpecs(
23        sim_f=sim_find_sine,  # Our simulator function
24        inputs=["x"],  # InputArray field names. "x" from gen_f output
25        out=[("y", float)],  # sim_f output. "y" = sine("x")
26    )  # sim_specs_end_tag
27
28    exit_criteria = ExitCriteria(sim_max=80)  # Stop libEnsemble after 80 simulations
29
30    ensemble = Ensemble(sim_specs, gen_specs, exit_criteria, libE_specs)
31    ensemble.add_random_streams()  # setup the random streams unique to each worker
32    ensemble.run()  # start the ensemble. Blocks until completion.
33
34    history = ensemble.H  # start visualizing our results
35
36    print([i for i in history.dtype.fields])  # (optional) to visualize our history array
37    print(history)
38
39    import matplotlib.pyplot as plt
40
41    colors = ["b", "g", "r", "y", "m", "c", "k", "w"]
42
43    for i in range(1, libE_specs.nworkers + 1):
44        worker_xy = np.extract(history["sim_worker"] == i, history)
45        x = [entry.tolist()[0] for entry in worker_xy["x"]]
46        y = [entry for entry in worker_xy["y"]]
47        plt.scatter(x, y, label="Worker {}".format(i), c=colors[i - 1])
48
49    plt.title("Sine calculations for a uniformly sampled random distribution")
50    plt.xlabel("x")
51    plt.ylabel("sine(x)")
52    plt.legend(loc="lower right")
53    plt.savefig("tutorial_sines.png")

Electrostatic Forces with Executor

These examples are from a test for evaluating the scaling capabilities of libEnsemble by calculating particle electrostatic forces through a user application. This application is registered with the MPIExecutor, then submitted for execution in the sim_f. Note the use of the parse_args=True which allows reading arguments such as the number of workers from the command line.

Traditional Version

Run using five workers with:

python run_libe_forces.py -n 5

One worker runs a persistent generator and the other four run the forces simulations.

tests/scaling_tests/forces/forces_simple/run_libe_forces.py
 1#!/usr/bin/env python
 2import os
 3import sys
 4
 5import numpy as np
 6from forces_simf import run_forces  # Sim func from current dir
 7
 8from libensemble import Ensemble
 9from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f
10from libensemble.executors import MPIExecutor
11from libensemble.gen_funcs.persistent_sampling import persistent_uniform as gen_f
12from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, LibeSpecs, SimSpecs
13
14if __name__ == "__main__":
15    # Initialize MPI Executor
16    exctr = MPIExecutor()
17
18    # Register simulation executable with executor
19    sim_app = os.path.join(os.getcwd(), "../forces_app/forces.x")
20
21    if not os.path.isfile(sim_app):
22        sys.exit("forces.x not found - please build first in ../forces_app dir")
23
24    exctr.register_app(full_path=sim_app, app_name="forces")
25
26    # Parse number of workers, comms type, etc. from arguments
27    ensemble = Ensemble(parse_args=True, executor=exctr)
28    nsim_workers = ensemble.nworkers - 1  # One worker is for persistent generator
29
30    # Persistent gen does not need resources
31    ensemble.libE_specs = LibeSpecs(
32        num_resource_sets=nsim_workers,
33        sim_dirs_make=True,
34    )
35
36    ensemble.sim_specs = SimSpecs(
37        sim_f=run_forces,
38        inputs=["x"],
39        outputs=[("energy", float)],
40    )
41
42    ensemble.gen_specs = GenSpecs(
43        gen_f=gen_f,
44        inputs=[],  # No input when start persistent generator
45        persis_in=["sim_id"],  # Return sim_ids of evaluated points to generator
46        outputs=[("x", float, (1,))],
47        user={
48            "initial_batch_size": nsim_workers,
49            "lb": np.array([1000]),  # min particles
50            "ub": np.array([3000]),  # max particles
51        },
52    )
53
54    # Starts one persistent generator. Simulated values are returned in batch.
55    ensemble.alloc_specs = AllocSpecs(
56        alloc_f=alloc_f,
57        user={
58            "async_return": False,  # False causes batch returns
59        },
60    )
61
62    # Instruct libEnsemble to exit after this many simulations
63    ensemble.exit_criteria = ExitCriteria(sim_max=8)
64
65    # Seed random streams for each worker, particularly for gen_f
66    ensemble.add_random_streams()
67
68    # Run ensemble
69    ensemble.run()
70
71    if ensemble.is_manager:
72        # Note, this will change if changing sim_max, nworkers, lb, ub, etc.
73        print(f'Final energy checksum: {np.sum(ensemble.H["energy"])}')

Object + yaml Version

tests/scaling_tests/forces/forces_adv/run_libe_forces_from_yaml.py
 1#!/usr/bin/env python
 2import os
 3import sys
 4
 5import numpy as np
 6
 7from libensemble.ensemble import Ensemble
 8from libensemble.executors.mpi_executor import MPIExecutor
 9from libensemble.tools import add_unique_random_streams
10
11####################
12
13sim_app = os.path.join(os.getcwd(), "../forces_app/forces.x")
14
15if not os.path.isfile(sim_app):
16    sys.exit("forces.x not found - please build first in ../forces_app dir")
17
18
19####################
20
21forces = Ensemble(parse_args=True)
22forces.from_yaml("forces.yaml")
23
24forces.logger.set_level("INFO")
25
26if forces.is_manager:
27    print(f"\nRunning with {forces.nworkers} workers\n")
28
29exctr = MPIExecutor()
30exctr.register_app(full_path=sim_app, app_name="forces")
31
32forces.libE_specs["ensemble_dir_path"] = "./ensemble"
33forces.gen_specs.user.update(
34    {
35        "lb": np.array([0]),
36        "ub": np.array([32767]),
37    }
38)
39
40forces.persis_info = add_unique_random_streams({}, forces.nworkers + 1)
41
42forces.run()
43forces.save_output(__file__)
tests/scaling_tests/forces/forces_adv/forces.yaml
 1libE_specs:
 2    save_every_k_gens: 1000
 3    sim_dirs_make: True
 4    profile: False
 5
 6exit_criteria:
 7    sim_max: 8
 8
 9sim_specs:
10    sim_f: forces_simf.run_forces
11    inputs:
12        - x
13    outputs:
14        energy:
15            type: float
16
17    user:
18        keys:
19            - seed
20        cores: 1
21        sim_particles: 1.e+3
22        sim_timesteps: 5
23        sim_kill_minutes: 10.0
24        particle_variance: 0.2
25        kill_rate: 0.5
26        fail_on_sim: False
27        fail_on_submit: False
28
29gen_specs:
30    gen_f: libensemble.gen_funcs.sampling.uniform_random_sample
31    outputs:
32        x:
33            type: float
34            size: 1
35    user:
36        gen_batch_size: 1000
37
38alloc_specs:
39    alloc_f: libensemble.alloc_funcs.give_sim_work_first.give_sim_work_first
40    outputs:
41        allocated:
42            type: bool
43    user:
44        batch_mode: True
45        num_active_gens: 1

Persistent APOSMM with Gradients

This example is also from the regression tests and demonstrates configuring a persistent run via a custom allocation function.

tests/regression_tests/test_persistent_aposmm_with_grad.py
  1"""
  2Runs libEnsemble with APOSMM with an NLopt local optimizer that uses gradient
  3information from the sim_f
  4
  5Execute via one of the following commands (e.g. 3 workers):
  6   mpiexec -np 4 python test_persistent_aposmm_with_grad.py
  7   python test_persistent_aposmm_with_grad.py --nworkers 3
  8   python test_persistent_aposmm_with_grad.py --nworkers 3 --comms tcp
  9
 10When running with the above commands, the number of concurrent evaluations of
 11the objective function will be 2, as one of the three workers will be the
 12persistent generator.
 13"""
 14
 15# Do not change these lines - they are parsed by run-tests.sh
 16# TESTSUITE_COMMS: local mpi tcp
 17# TESTSUITE_NPROCS: 4
 18# TESTSUITE_EXTRA: true
 19
 20import multiprocessing
 21import sys
 22from math import gamma, pi, sqrt
 23
 24import libensemble.gen_funcs
 25import numpy as np
 26
 27# Import libEnsemble items for this test
 28from libensemble.libE import libE
 29from libensemble.sim_funcs.six_hump_camel import six_hump_camel as sim_f
 30from libensemble.sim_funcs.six_hump_camel import six_hump_camel_func, six_hump_camel_grad
 31
 32libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt"
 33from time import time
 34
 35from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f
 36from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f
 37from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima
 38from libensemble.tools import add_unique_random_streams, parse_args, save_libE_output
 39
 40# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows).
 41if __name__ == "__main__":
 42    multiprocessing.set_start_method("fork", force=True)
 43
 44    nworkers, is_manager, libE_specs, _ = parse_args()
 45
 46    if is_manager:
 47        start_time = time()
 48
 49    if nworkers < 2:
 50        sys.exit("Cannot run with a persistent worker if only one worker -- aborting...")
 51
 52    n = 2
 53    sim_specs = {
 54        "sim_f": sim_f,
 55        "in": ["x"],
 56        "out": [("f", float), ("grad", float, n)],
 57    }
 58
 59    gen_out = [
 60        ("x", float, n),
 61        ("x_on_cube", float, n),
 62        ("sim_id", int),
 63        ("local_min", bool),
 64        ("local_pt", bool),
 65    ]
 66
 67    gen_in = ["x", "f", "grad", "local_pt", "sim_id", "sim_ended", "x_on_cube", "local_min"]
 68
 69    gen_specs = {
 70        "gen_f": gen_f,
 71        "in": gen_in,
 72        "persis_in": gen_in,
 73        "out": gen_out,
 74        "user": {
 75            "initial_sample_size": 0,  # Don't need to do evaluations because the sampling already done below
 76            "localopt_method": "LD_MMA",
 77            "rk_const": 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi),
 78            "stop_after_k_minima": 15,
 79            "xtol_rel": 1e-6,
 80            "ftol_rel": 1e-6,
 81            "max_active_runs": 6,
 82            "lb": np.array([-3, -2]),
 83            "ub": np.array([3, 2]),
 84        },
 85    }
 86
 87    alloc_specs = {"alloc_f": alloc_f}
 88
 89    persis_info = add_unique_random_streams({}, nworkers + 1)
 90
 91    exit_criteria = {"sim_max": 1000}
 92
 93    # Load in "already completed" set of 'x','f','grad' values to give to libE/persistent_aposmm
 94    sample_size = len(minima)
 95
 96    H0_dtype = [
 97        ("x", float, n),
 98        ("grad", float, n),
 99        ("sim_id", int),
100        ("x_on_cube", float, n),
101        ("sim_ended", bool),
102        ("f", float),
103        ("gen_informed", bool),
104        ("sim_started", bool),
105    ]
106    H0 = np.zeros(sample_size, dtype=H0_dtype)
107
108    # Two points in the following sample have the same best function value, which
109    # tests the corner case for some APOSMM logic
110    H0["x"] = np.round(minima, 1)
111    H0["x_on_cube"] = (H0["x"] - gen_specs["user"]["lb"]) / (gen_specs["user"]["ub"] - gen_specs["user"]["lb"])
112    H0["sim_id"] = range(sample_size)
113    H0[["sim_started", "gen_informed", "sim_ended"]] = True
114
115    for i in range(sample_size):
116        H0["f"][i] = six_hump_camel_func(H0["x"][i])
117        H0["grad"][i] = six_hump_camel_grad(H0["x"][i])
118
119    # Perform the run
120    H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, alloc_specs, libE_specs, H0=H0)
121
122    if is_manager:
123        assert persis_info[1].get("run_order"), "Run_order should have been given back"
124        assert (
125            len(persis_info[1]["run_order"]) >= gen_specs["user"]["stop_after_k_minima"]
126        ), "This test should have many runs started."
127        assert len(H) < exit_criteria["sim_max"], "Test should have stopped early due to 'stop_after_k_minima'"
128
129        print("[Manager]:", H[np.where(H["local_min"])]["x"])
130        print("[Manager]: Time taken =", time() - start_time, flush=True)
131
132        tol = 1e-5
133        for m in minima:
134            # The minima are known on this test problem.
135            # We use their values to test APOSMM has identified all minima
136            print(np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)), flush=True)
137            assert np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)) < tol
138
139        save_libE_output(H, persis_info, __file__, nworkers)