diff --git a/src/gemseo/algos/doe/base_doe_library.py b/src/gemseo/algos/doe/base_doe_library.py index 6e829040308490634d0e25bd0e32e43e21d65ff8..81bddcbe409e5f1fbf8211cc022dcd4df1587720 100644 --- a/src/gemseo/algos/doe/base_doe_library.py +++ b/src/gemseo/algos/doe/base_doe_library.py @@ -286,8 +286,8 @@ class BaseDOELibrary(BaseDriverLibrary, Serializable): callback(index, result) except ValueError: # noqa: PERF203 LOGGER.exception( - "Problem with evaluation of sample:" - "%s result is not taken into account in DOE.", + "The evaluation of the functions at point %s raised a" + " ValueError; skipping to the next point.", input_value, ) diff --git a/tests/algos/doe/test_doe_lib.py b/tests/algos/doe/test_doe_lib.py index 592ca98e5b6e079d1827564b7765cafdb00c0a67..611ae91e3f39d50f406580c394b6a9eeaa75bde3 100644 --- a/tests/algos/doe/test_doe_lib.py +++ b/tests/algos/doe/test_doe_lib.py @@ -543,3 +543,49 @@ def test_eval_func_and_eval_jac(eval_func, eval_jac): name = "pow2" assert (name in last_item) is eval_func assert (database.get_gradient_name(name) in last_item) is eval_jac + + +class _DummyDiscValueError(Discipline): + default_grammar_type = Discipline.GrammarType.SIMPLE + + def __init__(self) -> None: + super().__init__("foo") + self.input_grammar.update_from_names("x") + self.output_grammar.update_from_names(("z", "t")) + self.output_grammar.update_from_types({ + "z": ndarray, + "t": ndarray, + }) + + def _run(self, input_data: StrKeyMapping): + x = input_data["x"] + if x < 0: + msg = "The sample is undefined for x < 0." + raise ValueError(msg) + return {"z": array([sum(x)]), "t": 2 * x + 3} + + +@pytest.mark.parametrize("formulation", ["MDF", "DisciplinaryOpt", "IDF"]) +def test_value_error_filtering(formulation, caplog): + """Test that the DOELibrary can skip a sample that raises a ``ValueError``.""" + caplog.set_level("ERROR") + design_space = DesignSpace() + design_space.add_variable("x", lower_bound=-1.0, upper_bound=1.0) + + scenario = create_scenario( + [_DummyDiscValueError()], + "z", + design_space, + scenario_type="DOE", + formulation_name=formulation, + ) + custom_doe_settings = CustomDOE_Settings(samples=array([[1.0], [-1.0], [0.0]])) + scenario.execute(custom_doe_settings) + assert len(scenario.formulation.optimization_problem.database) == 2 + driver_message = ( + "The evaluation of the functions at point [-1.] raised a ValueError; " + "skipping to the next point." + ) + assert driver_message in caplog.text + discipline_message = "The sample is undefined for x < 0." + assert discipline_message in caplog.text