diff --git a/modelitool/combitabconvert.py b/modelitool/combitabconvert.py index 31d1551..b2feb3c 100644 --- a/modelitool/combitabconvert.py +++ b/modelitool/combitabconvert.py @@ -1,4 +1,5 @@ import datetime as dt +from pathlib import Path import pandas as pd @@ -33,7 +34,7 @@ def get_dymo_time_index(df): return list(pd.Series(sec_dt).cumsum()) -def df_to_combitimetable(df, filename): +def write_combitt_from_df(df: pd.DataFrame, file_path: Path | str): """ Write a text file compatible with modelica Combitimetables object from a Pandas DataFrame with a DatetimeIndex. DataFrames with non monotonically increasing @@ -45,10 +46,7 @@ def df_to_combitimetable(df, filename): """ if not isinstance(df, pd.DataFrame): raise ValueError(f"df must be an instance of pandas DataFrame. Got {type(df)}") - if not isinstance(df.index, pd.DatetimeIndex): - raise ValueError( - f"DataFrame index must be an instance of DatetimeIndex. " f"Got {type(df)}" - ) + if not df.index.is_monotonic_increasing: raise ValueError( "df DateTimeIndex is not monotonically increasing, this will" @@ -56,7 +54,7 @@ def df_to_combitimetable(df, filename): ) df = df.copy() - with open(filename, "w") as file: + with open(file_path, "w") as file: file.write("#1 \n") line = "" line += f"double table1({df.shape[0]}, {df.shape[1] + 1})\n" @@ -65,6 +63,7 @@ def df_to_combitimetable(df, filename): line += f"\t({i + 1}){col}" file.write(f"{line} \n") - df.index = datetime_to_seconds(df.index) + if isinstance(df.index, pd.DatetimeIndex): + df.index = datetime_to_seconds(df.index) file.write(df.to_csv(header=False, sep="\t", lineterminator="\n")) diff --git a/modelitool/corrai_connector.py b/modelitool/corrai_connector.py deleted file mode 100644 index 59d2d41..0000000 --- a/modelitool/corrai_connector.py +++ /dev/null @@ -1,137 +0,0 @@ -import numpy as np -import pandas as pd - -from corrai.base.parameter import Parameter - -from modelitool.simulate import OMModel - - -class ModelicaFunction: - """ - A class that defines a function based on a Modelitool Simulator. - - Args: - om_model (object): A fully configured Modelitool Simulator object. - param_list (list): A list of parameter defined as dictionaries. At least , each - parameter dict must have the following keys : "names", "interval". - indicators (list, optional): A list of indicators to be returned by the - function. An indicator must be one of the Simulator outputs. If not - provided, all indicators in the simulator's output list will be returned. - Default is None. - agg_methods_dict (dict, optional): A dictionary that maps indicator names to - aggregation methods. Each aggregation method should be a function that takes - an array of values and returns a single value. It can also be an error - function that will return an error indicator between the indicator results - and a reference array of values defined in reference_df. - If not provided, the default aggregation method for each indicator is - numpy.mean. Default is None. - reference_dict (dict, optional): When using an error function as agg_method, a - reference_dict must be used to map indicator names to reference indicator - names. The specified reference name will be used to locate the value in - reference_df. - If provided, the function will compute each indicator's deviation from its - reference indicator using the corresponding aggregation method. - Default is None. - reference_df (pandas.DataFrame, optional): A pandas DataFrame containing the - reference values for each reference indicator specified in reference_dict. - The DataFrame should have the same length as the simulation results. - Default is None. - custom_ind_dict (dict, optional): A dictionary that maps indicator names to - custom indicator information. Each custom indicator information should be - a dictionary containing the following keys: - - "depends_on": A list of indicator names that the custom function - depends on. They should be in output list of simulator - - "function": A function that computes the custom indicator values based - on the values of indicators specified in "depends_on". - If provided, the function will calculate custom indicators in addition - to regular indicators. Default is None. - - Returns: - pandas.Series: A pandas Series containing the function results. - The index is the indicator names and the values are the aggregated simulation - results. - - Raises: - ValueError: If reference_dict and reference_df are not both provided or both - None. - """ - - def __init__( - self, - om_model: OMModel, - param_list, - indicators=None, - agg_methods_dict=None, - reference_dict=None, - reference_df=None, - custom_ind_dict=None, - ): - self.om_model = om_model - self.param_list = param_list - if indicators is None: - self.indicators = om_model.get_available_outputs() - else: - self.indicators = indicators - if agg_methods_dict is None: - self.agg_methods_dict = {ind: np.mean for ind in self.indicators} - else: - self.agg_methods_dict = agg_methods_dict - if (reference_dict is not None and reference_df is None) or ( - reference_dict is None and reference_df is not None - ): - raise ValueError("Both reference_dict and reference_df should be provided") - self.reference_dict = reference_dict - self.reference_df = reference_df - self.custom_ind_dict = custom_ind_dict if custom_ind_dict is not None else [] - - def function(self, x_dict): - """ - Calculates the function values for the given input dictionary. - - Args: - - x_dict (dict): A dictionary of input values. - - Returns: - - res_series (Series): A pandas Series object containing - the function values with function names as indices. - """ - temp_dict = { - param[Parameter.NAME]: x_dict[param[Parameter.NAME]] - for param in self.param_list - } - self.om_model.set_param_dict(temp_dict) - res = self.om_model.simulate() - - function_results = {} - - # Calculate regular indicators - for ind in self.indicators: - if ind in res: - function_results[ind] = res[ind] - - # Calculate custom indicators - for ind in self.indicators: - if ind not in function_results and ind in self.custom_ind_dict: - ind_info = self.custom_ind_dict[ind] - if all(output in res for output in ind_info["depends_on"]): - custom_values = ind_info["function"]( - *[res[output] for output in ind_info["depends_on"]] - ) - function_results[ind] = custom_values - - # Aggregate the indicators - for ind in self.indicators: - if ind in function_results and ind in self.agg_methods_dict: - if self.reference_dict and ind in self.reference_dict: - ref_values = self.reference_df[self.reference_dict[ind]] - function_results[ind] = self.agg_methods_dict[ind]( - function_results[ind], ref_values - ) - - else: - function_results[ind] = self.agg_methods_dict[ind]( - function_results[ind] - ) - - res_series = pd.Series(function_results, dtype="float64") - return res_series diff --git a/modelitool/simulate.py b/modelitool/simulate.py index 47be737..7fe1d1c 100644 --- a/modelitool/simulate.py +++ b/modelitool/simulate.py @@ -1,63 +1,93 @@ +import datetime as dt import os import tempfile import warnings from pathlib import Path +import numpy as np import pandas as pd from OMPython import ModelicaSystem, OMCSessionZMQ from corrai.base.model import Model +from corrai.fmu import ( + datetime_index_to_seconds_index, + parse_simulation_times, + seconds_index_to_datetime_index, +) -from modelitool.combitabconvert import df_to_combitimetable, seconds_to_datetime +from sklearn.pipeline import Pipeline + +from modelitool.combitabconvert import write_combitt_from_df + +DEFAULT_SIMULATION_OPTIONS = { + "startTime": 0, + "stopTime": 24 * 3600, + "stepSize": 60, + "solver": "dassl", + "tolerance": 1e-6, + "outputFormat": "mat", +} class OMModel(Model): + """ + Wrap OpenModelica (via OMPython) in the corrai Model formalism. + + Parameters + ---------- + model_path : Path | str + Path to the Modelica model file. + simulation_options : dict, optional + Dictionary of simulation options including: + ``startTime``, ``stopTime``, ``stepSize``, ``tolerance``, + ``solver``, ``outputFormat``. + Can also include ``boundary`` (pd.DataFrame) if the model + uses a CombiTimeTable. + output_list : list of str, optional + List of variables to record during simulation. + simulation_path : Path, optional + Directory where simulation files will be written. + boundary_table : str or None, optional + Name of the CombiTimeTable object in the Modelica model + that is used to provide boundary conditions. + + - If a string is provided, boundary data can be passed through + ``simulation_options["boundary"]``. + - If None (default), no CombiTimeTable will be set and any + provided ``boundary`` will be ignored. + package_path : Path, optional + Path to the Modelica package directory (package.mo). + lmodel : list of str, optional + List of Modelica libraries to load. + + Examples + -------- + >>> import pandas as pd + >>> from corrai.om import OMModel + >>> model = OMModel("MyModel.mo", output_list=["y"], boundary_table="Boundaries") + >>> x = pd.DataFrame({"y": [1, 2, 3]}, index=[0, 1, 2]) + >>> res = model.simulate(simulation_options={"boundary": x, "stepSize": 1}) + """ + def __init__( self, model_path: Path | str, - simulation_options: dict[str, float | str | int] = None, output_list: list[str] = None, - simulation_path: Path = None, - x_combitimetable_name: str = None, + simulation_dir: Path = None, + boundary_table_name: str | None = None, package_path: Path = None, lmodel: list[str] = None, ): - """ - A class to wrap ompython to simulate Modelica system. - Make it easier to change parameters values and simulation options. - Allows specification of boundary conditions using Pandas Dataframe. - The class inherits from corrai Model base class, and can be used with the - module. - - - model_path (Path | str): Path to the Modelica model file. - - simulation_options (dict[str, float | str | int], optional): - Options for the simulation. May include values for "startTime", - "stopTime", "stepSize", "tolerance", "solver", "outputFormat". - - output_list (list[str], optional): List of output variables. Default - will output all available variables. - - simulation_path (Path, optional): Path to run the simulation and - save the simulation results. - - x_combitimetable_name (str, optional): Name of the Modelica System - combi timetable object name, that is used to set the boundary condition. - - package_path (Path, optional): Path to the Modelica package directory - if necessary (package.mo). - - lmodel (list[str], optional): List of Modelica libraries to load. - """ + super().__init__(is_dynamic=True) + self.boundary_table_name = boundary_table_name + self.output_list = output_list - self.x_combitimetable_name = ( - x_combitimetable_name if x_combitimetable_name is not None else "Boundaries" - ) - self._simulation_path = ( - simulation_path if simulation_path is not None else Path(tempfile.mkdtemp()) + self.simulation_dir = ( + Path(tempfile.mkdtemp()) if simulation_dir is None else simulation_dir ) - if not os.path.exists(self._simulation_path): - os.mkdir(simulation_path) - - self._x = pd.DataFrame() - self.output_list = output_list self.omc = OMCSessionZMQ() - self.omc.sendExpression(f'cd("{self._simulation_path.as_posix()}")') + self.omc.sendExpression(f'cd("{self.simulation_dir.as_posix()}")') model_system_args = { "fileName": (package_path or model_path).as_posix(), @@ -65,147 +95,170 @@ def __init__( "lmodel": lmodel if lmodel is not None else [], "variableFilter": ".*" if output_list is None else "|".join(output_list), } - self.model = ModelicaSystem(**model_system_args) - if simulation_options is not None: - self._set_simulation_options(simulation_options) + self.property_dict = self.get_property_dict() def simulate( self, - parameter_dict: dict = None, + property_dict: dict[str, str | int | float] = None, simulation_options: dict = None, - x: pd.DataFrame = None, - verbose: bool = True, + solver_duplicated_keep: str = "last", + post_process_pipeline: Pipeline = None, simflags: str = None, - year: int = None, ) -> pd.DataFrame: """ - Runs the simulation with the provided parameters, simulation options and - boundary conditions. - - parameter_dict (dict, optional): Dictionary of parameters. - - simulation_options (dict, optional): May include values for "startTime", - "stopTime", "stepSize", "tolerance", "solver", "outputFormat". Can - also include 'x' with a DataFrame for boundary conditions. - - x (pd.DataFrame, optional): Input data for the simulation. Index shall - be a DatetimeIndex or integers. Columns must match the combitimetable - used to specify boundary conditions in the Modelica System. If 'x' is - provided both in simulation_options and as a direct parameter, the one - provided as direct parameter will be used. - - verbose (bool, optional): If True, prints simulation progress. Defaults to - True. - - simflags (str, optional): Additional simulation flags. - - year (int, optional): If x boundary conditions is not specified or do not - have a DateTime index (seconds int), a year can be specified to convert - int seconds index to a datetime index. If simulation spans overs several - years, it shall be the year when it begins. + Run an OpenModelica simulation and return results as a pandas DataFrame. + + Parameters + ---------- + property_dict : dict, optional + Dictionary of model parameters to update before simulation. + Keys must match Modelica parameter names. + simulation_options : dict, optional + Simulation options in the same format as in ``OMModel.__init__``. + If ``simulation_options["boundary"]`` is provided and the model has + a ``boundary_table`` name, the DataFrame is exported as a + CombiTimeTable-compatible text file and injected into the model. + simflags : str, optional + Additional simulator flags passed directly to OpenModelica. + + Returns + ------- + pandas.DataFrame + Simulation results indexed either by: + + - a timestamp index if a boundary table is used + (the year is inferred from ``boundary.index[0].year``), or + - integer seconds since the simulation start otherwise. + + The DataFrame columns include either: + - the variables listed in ``output_list``, or + - all variables produced by OpenModelica. + """ - if parameter_dict is not None: - self.set_param_dict(parameter_dict) + simu_property = self.property_dict.copy() + simu_property.update(dict(property_dict or {})) + + simulation_options = { + **DEFAULT_SIMULATION_OPTIONS, + **(simulation_options or {}), + } + + start, stop, step = ( + simulation_options.get(it, None) + for it in ["startTime", "stopTime", "stepSize"] + ) + + # Output step cannot be used in ompython + start_sec, stop_sec, step_sec, _ = parse_simulation_times( + start, stop, step, step + ) + om_simu_opt = simulation_options | { + "startTime": start_sec, + "stopTime": stop_sec, + "stepSize": step_sec, + } + + boundary_df = None + if simu_property: + boundary_df = simu_property.pop("boundary", boundary_df) - if simulation_options is not None: - if x is not None and "x" in simulation_options: + if simulation_options: + sim_boundary = om_simu_opt.pop("boundary", boundary_df) + + if boundary_df is None and sim_boundary is not None: + boundary_df = sim_boundary + elif boundary_df is not None and sim_boundary is not None: warnings.warn( - "Boundary file 'x' specified both in simulation_options and as a " - "direct parameter. The 'x' provided in simulate() will be used.", + "Boundary specified in both property_dict and " + "simulation_options. The one in property_dict will be used.", UserWarning, stacklevel=2, ) - self._set_simulation_options(simulation_options) + if boundary_df is not None: + boundary_df = boundary_df.copy() + if isinstance(boundary_df.index, pd.DatetimeIndex): + boundary_df.index = datetime_index_to_seconds_index(boundary_df.index) + + if not ( + boundary_df.index[0] <= start_sec <= boundary_df.index[-1] + and boundary_df.index[0] <= stop_sec <= boundary_df.index[-1] + ): + raise ValueError( + "'startTime' and 'stopTime' are outside boundary DataFrame" + ) - if x is not None: - self._set_x(x) + write_combitt_from_df(boundary_df, self.simulation_dir / "boundaries.txt") + full_path = (self.simulation_dir / "boundaries.txt").resolve().as_posix() + self.set_property_dict({f"{self.boundary_table_name}.fileName": full_path}) + self.model.setSimulationOptions(om_simu_opt) output_format = self.model.getSimulationOptions()["outputFormat"] result_file = "res.csv" if output_format == "csv" else "res.mat" self.model.simulate( - resultfile=(self._simulation_path / result_file).as_posix(), + resultfile=(self.simulation_dir / result_file).as_posix(), simflags=simflags, - verbose=verbose, ) if output_format == "csv": - res = pd.read_csv(self._simulation_path / "res.csv", index_col=0) + res = pd.read_csv(self.simulation_dir / "res.csv", index_col=0) if self.output_list is not None: res = res.loc[:, self.output_list] else: - if self.output_list is None: - var_list = list(self.model.getSolutions()) - else: - var_list = ["time"] + self.output_list - - res = pd.DataFrame( - data=self.model.getSolutions( - varList=var_list, - resultfile=(self._simulation_path / result_file).as_posix(), - ).T, - columns=var_list, + var_list = ["time"] + (self.output_list or list(self.model.getSolutions())) + raw = self.model.getSolutions( + varList=var_list, + resultfile=(self.simulation_dir / result_file).as_posix(), ) - res.set_index("time", inplace=True) - - res.index = pd.to_timedelta(res.index, unit="second") - res = res.resample( - f"{int(self.model.getSimulationOptions()['stepSize'])}s" - ).mean() - res.index = res.index.to_series().dt.total_seconds() - - if not self._x.empty: - res.index = seconds_to_datetime(res.index, self._x.index[0].year) - elif year is not None: - res.index = seconds_to_datetime(res.index, year) - else: - res.index = res.index.astype("int") - return res - - def save(self, file_path: Path): - pass + arr = np.atleast_2d(raw).T - def get_available_outputs(self): - if self.model.getSolutions() is None: - # A bit dirty but simulation must be run once so - # getSolutions() can access results - self.simulate(verbose=False) + _, unique_idx = np.unique(var_list, return_index=True) + var_list = [var_list[i] for i in sorted(unique_idx)] + arr = arr[:, sorted(unique_idx)] - return list(self.model.getSolutions()) + res = pd.DataFrame(arr, columns=var_list).set_index("time") - def get_parameters(self): - """ - Get parameters of the model or a loaded library. - Returns: - dict: Dictionary containing the parameters. - """ - return self.model.getParameters() + res = res.loc[~res.index.duplicated(keep=solver_duplicated_keep)] - def _set_simulation_options(self, simulation_options): - standard_options = { - "startTime": simulation_options.get("startTime"), - "stopTime": simulation_options.get("stopTime"), - "stepSize": simulation_options.get("stepSize"), - "tolerance": simulation_options.get("tolerance"), - "solver": simulation_options.get("solver"), - "outputFormat": simulation_options.get("outputFormat"), - } + if isinstance(start, (pd.Timestamp, dt.datetime)): + res.index = seconds_index_to_datetime_index(res.index, start.year) + res.index = res.index.round("s") + res = res.tz_localize(start.tz) + res.index.freq = res.index.inferred_freq + else: + res.index = round(res.index.to_series(), 2) - options = [f"{k}={v}" for k, v in standard_options.items() if v is not None] - self.model.setSimulationOptions(options) - self.simulation_options = simulation_options + if post_process_pipeline is not None: + res = post_process_pipeline.fit_transform(res) - if "x" in simulation_options: - self._set_x(simulation_options["x"]) + return res - def _set_x(self, df: pd.DataFrame): - """Sets the input data for the simulation and updates the corresponding file.""" - if not self._x.equals(df): - new_bounds_path = self._simulation_path / "boundaries.txt" - df_to_combitimetable(df, new_bounds_path) - full_path = (self._simulation_path / "boundaries.txt").resolve().as_posix() - self.set_param_dict({f"{self.x_combitimetable_name}.fileName": full_path}) - self._x = df + def get_property_values( + self, property_list: str | tuple[str, ...] | list[str] + ) -> list[str | int | float | None]: + if isinstance(property_list, str): + property_list = (property_list,) + return [self.model.getParameters(prop) for prop in property_list] + + # TODO Find a way to get output without simulation + # def get_available_outputs(self): + # try: + # sols = self.model.getSolutions() + # except ModelicaSystemError: + # self.simulate() + # sols = self.model.getSolutions() + # return list(sols) + + def get_property_dict(self): + return self.model.getParameters() - def set_param_dict(self, param_dict): - self.model.setParameters([f"{item}={val}" for item, val in param_dict.items()]) + def set_property_dict(self, property_dict): + self.model.setParameters( + [f"{item}={val}" for item, val in property_dict.items()] + ) def load_library(lib_path): diff --git a/requirements/install-min.txt b/requirements/install-min.txt index 39e5f05..31ddd5d 100644 --- a/requirements/install-min.txt +++ b/requirements/install-min.txt @@ -1,4 +1,4 @@ pandas>=1.5.0 numpy>=1.17.3 OMPython>=3.5.2 -corrai>=0.3.0 +corrai>=1.0..0 diff --git a/setup.py b/setup.py index 3bbe610..f6126f6 100644 --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ "pandas>=1.5.0", "numpy>=1.17.3", "OMPython>=3.5.2", - "corrai>=0.3.0", + "corrai>=1.0.0", ], packages=find_packages(exclude=["tests*"]), include_package_data=True, diff --git a/tests/test_combitabconvert.py b/tests/test_combitabconvert.py index 8c94a44..dcb4300 100644 --- a/tests/test_combitabconvert.py +++ b/tests/test_combitabconvert.py @@ -6,8 +6,8 @@ from modelitool.combitabconvert import ( datetime_to_seconds, - df_to_combitimetable, seconds_to_datetime, + write_combitt_from_df, ) @@ -19,15 +19,10 @@ def test_get_dymo_time_index(self): def test_df_to_combitimetable(self, tmpdir): with pytest.raises(ValueError): - df_to_combitimetable([1, 2, 3], tmpdir / "test.txt") + write_combitt_from_df([1, 2, 3], tmpdir / "test.txt") with pytest.raises(ValueError): - df_to_combitimetable( - pd.DataFrame(data=[1, 2, 3], index=[1, 2, 3]), tmpdir / "test.txt" - ) - - with pytest.raises(ValueError): - df_to_combitimetable( + write_combitt_from_df( pd.DataFrame( data=[1, 2, 3], index=pd.DatetimeIndex( @@ -59,7 +54,7 @@ def test_df_to_combitimetable(self, tmpdir): "10800.0\t0\t1\n" ) - df_to_combitimetable(df, tmpdir / "test.txt") + write_combitt_from_df(df, tmpdir / "test.txt") with open(tmpdir / "test.txt") as file: contents = file.read() diff --git a/tests/test_corrai_connector.py b/tests/test_corrai_connector.py deleted file mode 100644 index cf7467a..0000000 --- a/tests/test_corrai_connector.py +++ /dev/null @@ -1,155 +0,0 @@ -from pathlib import Path - -import pytest - -import numpy as np -import pandas as pd - -from corrai.base.parameter import Parameter - -from sklearn.metrics import mean_absolute_error, mean_squared_error - -from modelitool.corrai_connector import ModelicaFunction -from modelitool.simulate import OMModel - -PACKAGE_DIR = Path(__file__).parent / "TestLib" - - -PARAMETERS = [ - {Parameter.NAME: "x.k", Parameter.INTERVAL: (1.0, 3.0)}, - {Parameter.NAME: "y.k", Parameter.INTERVAL: (1.0, 3.0)}, -] - -agg_methods_dict = { - "res1.showNumber": mean_squared_error, - "res2.showNumber": mean_absolute_error, -} - -reference_dict = {"res1.showNumber": "meas1", "res2.showNumber": "meas2"} - - -X_DICT = {"x.k": 2, "y.k": 2} - -dataset = pd.DataFrame( - { - "meas1": [6, 2], - "meas2": [14, 1], - }, - index=pd.date_range("2023-01-01 00:00:00", freq="s", periods=2), -) - -expected_res = pd.DataFrame( - { - "meas1": [8.15, 8.15], - "meas2": [12.31, 12.31], - }, - index=pd.date_range("2023-01-01 00:00:00", freq="s", periods=2), -) - - -@pytest.fixture(scope="session") -def ommodel(tmp_path_factory): - simu_options = { - "startTime": 0, - "stopTime": 1, - "stepSize": 1, - "tolerance": 1e-06, - "solver": "dassl", - "outputFormat": "csv", - } - - outputs = ["res1.showNumber", "res2.showNumber"] - - simu = OMModel( - model_path="TestLib.ishigami_two_outputs", - package_path=PACKAGE_DIR / "package.mo", - simulation_options=simu_options, - output_list=outputs, - lmodel=["Modelica"], - ) - - return simu - - -class TestModelicaFunction: - def test_function_indicators(self, ommodel): - mf = ModelicaFunction( - om_model=ommodel, - param_list=PARAMETERS, - agg_methods_dict=agg_methods_dict, - indicators=["res1.showNumber", "res2.showNumber"], - reference_df=dataset, - reference_dict=reference_dict, - ) - - res = mf.function(X_DICT) - - np.testing.assert_allclose( - np.array([res["res1.showNumber"], res["res2.showNumber"]]), - np.array( - [ - mean_squared_error(expected_res["meas1"], dataset["meas1"]), - mean_absolute_error(expected_res["meas2"], dataset["meas2"]), - ] - ), - rtol=0.01, - ) - - def test_custom_indicators(self, ommodel): - mf = ModelicaFunction( - om_model=ommodel, - param_list=PARAMETERS, - indicators=["res1.showNumber", "res2.showNumber", "custom_indicator"], - custom_ind_dict={ - "custom_indicator": { - "depends_on": ["res1.showNumber", "res2.showNumber"], - "function": lambda x, y: x + y, - } - }, - ) - - res = mf.function(X_DICT) - - # Test custom indicator - np.testing.assert_allclose( - res["custom_indicator"], - expected_res["meas1"] + expected_res["meas2"], - rtol=0.01, - ) - - def test_function_no_indicators(self, ommodel): - mf = ModelicaFunction( - om_model=ommodel, - param_list=PARAMETERS, - agg_methods_dict=None, - indicators=None, - reference_df=None, - reference_dict=None, - ) - - res = mf.function(X_DICT) - - np.testing.assert_allclose( - np.array([res["res1.showNumber"], res["res2.showNumber"]]), - np.array([np.mean(expected_res["meas1"]), np.mean(expected_res["meas2"])]), - rtol=0.01, - ) - - def test_warning_error(self, ommodel): - # reference_df is not provided - with pytest.raises(ValueError): - ModelicaFunction( - om_model=ommodel, - param_list=PARAMETERS, - reference_df=None, - reference_dict=dataset, - ) - - # reference_dict is not provided - with pytest.raises(ValueError): - ModelicaFunction( - om_model=ommodel, - param_list=PARAMETERS, - reference_df=dataset, - reference_dict=None, - ) diff --git a/tests/test_simulate.py b/tests/test_simulate.py index 0bd5a96..3221714 100644 --- a/tests/test_simulate.py +++ b/tests/test_simulate.py @@ -2,7 +2,6 @@ import pytest -import numpy as np import pandas as pd from modelitool.simulate import OMModel, library_contents, load_library @@ -12,42 +11,40 @@ @pytest.fixture(scope="session") def simul(tmp_path_factory): - simulation_options = { - "startTime": 0, - "stopTime": 2, - "stepSize": 1, - "tolerance": 1e-06, - "solver": "dassl", - "outputFormat": "csv", - } - - outputs = ["res.showNumber"] - test_run_path = tmp_path_factory.mktemp("run") simu = OMModel( model_path="TestLib.rosen", package_path=PACKAGE_DIR / "package.mo", - simulation_options=simulation_options, - output_list=outputs, - simulation_path=test_run_path, + output_list=["res.showNumber"], + simulation_dir=test_run_path, lmodel=["Modelica"], ) return simu class TestSimulator: + def test_get_property_values(self, simul): + values = simul.get_property_values(["x.k", "y.k"]) + assert isinstance(values, list) + assert len(values) == 2 + assert values[0], values[1] == ["2.0"] + + # Comment while ompython version < 4+ + # with pytest.raises(KeyError): + # simul.get_property_values("nonexistent.param") + def test_set_param_dict(self, simul): test_dict = { "x.k": 2.0, "y.k": 2.0, } - simul.set_param_dict(test_dict) + simul.set_property_dict(test_dict) for key in test_dict.keys(): assert float(test_dict[key]) == float(simul.model.getParameters()[key]) - assert simul.get_parameters() == { + assert simul.get_property_dict() == { "x.k": "2.0", "x.y": None, "y.k": "2.0", @@ -57,15 +54,39 @@ def test_set_param_dict(self, simul): } def test_simulate_get_results(self, simul): - assert simul.get_available_outputs() == [ - "time", - "res.numberPort", - "res.showNumber", - ] - res = simul.simulate() - ref = pd.DataFrame({"res.showNumber": [401.0, 401.0, 401.0]}) + simulation_options = { + "startTime": 0, + "stopTime": 2, + "stepSize": 1, + "tolerance": 1e-06, + "solver": "dassl", + "outputFormat": "csv", + } + + res = simul.simulate(simulation_options=simulation_options) + ref = pd.DataFrame({"res.showNumber": [401, 401, 401]}) assert ref.equals(res) + res_dt = simul.simulate( + simulation_options={ + "startTime": pd.Timestamp("2009-01-01 00:00:00", tz="UTC"), + "stopTime": pd.Timestamp("2009-01-01 00:00:02", tz="UTC"), + "stepSize": pd.Timedelta("1s"), + "tolerance": 1e-06, + "solver": "dassl", + "outputFormat": "mat", + } + ) + + ref = pd.DataFrame( + {"res.showNumber": [401.0, 401.0, 401.0]}, + pd.date_range( + "2009-01-01 00:00:00", freq="s", periods=3, tz="UTC", name="time" + ), + ) + + pd.testing.assert_frame_equal(res_dt, ref) + def test_load_and_print_library(self, simul, capfd): libpath = PACKAGE_DIR try: @@ -79,7 +100,7 @@ def test_load_and_print_library(self, simul, capfd): assert "package.mo" in out def test_get_parameters(self, simul): - param = simul.get_parameters() + param = simul.get_property_dict() expected_param = { "res.significantDigits": "2", "res.use_numberPort": "true", @@ -90,78 +111,34 @@ def test_get_parameters(self, simul): } assert param == expected_param - def test_set_boundaries_df(self): - simulation_options = { - "startTime": 16675200, - "stopTime": 16682400, - "stepSize": 1 * 3600, - "tolerance": 1e-06, - "solver": "dassl", - "outputFormat": "mat", - } - - x_options = pd.DataFrame( - {"Boundaries.y[1]": [10, 20, 30], "Boundaries.y[2]": [3, 4, 5]}, - index=pd.date_range("2009-07-13 00:00:00", periods=3, freq="h"), - ) - x_direct = pd.DataFrame( - {"Boundaries.y[1]": [100, 200, 300], "Boundaries.y[2]": [30, 40, 50]}, - index=pd.date_range("2009-07-13 00:00:00", periods=3, freq="h"), - ) - - simu = OMModel( - model_path="TestLib.boundary_test", - package_path=PACKAGE_DIR / "package.mo", - lmodel=["Modelica"], - ) - - simulation_options_with_x = simulation_options.copy() - simulation_options_with_x["x"] = x_options - res1 = simu.simulate(simulation_options=simulation_options_with_x) - res1 = res1.loc[:, ["Boundaries.y[1]", "Boundaries.y[2]"]] - np.testing.assert_allclose(x_options.to_numpy(), res1.to_numpy()) - assert np.all( - [x_options.index[i] == res1.index[i] for i in range(len(x_options.index))] - ) - assert np.all( - [ - x_options.columns[i] == res1.columns[i] - for i in range(len(x_options.columns)) - ] - ) - - simu = OMModel( - model_path="TestLib.boundary_test", - package_path=PACKAGE_DIR / "package.mo", - lmodel=["Modelica"], - ) - res2 = simu.simulate(simulation_options=simulation_options, x=x_direct) - res2 = res2.loc[:, ["Boundaries.y[1]", "Boundaries.y[2]"]] - np.testing.assert_allclose(x_direct.to_numpy(), res2.to_numpy()) - assert np.all( - [x_direct.index[i] == res2.index[i] for i in range(len(x_direct.index))] - ) - assert np.all( - [ - x_direct.columns[i] == res2.columns[i] - for i in range(len(x_direct.columns)) - ] - ) - - simu = OMModel( - model_path="TestLib.boundary_test", - package_path=PACKAGE_DIR / "package.mo", - lmodel=["Modelica"], - ) - with pytest.warns( - UserWarning, - match="Boundary file 'x' specified both in simulation_options and as a " - "direct parameter", - ): - res3 = simu.simulate( - simulation_options=simulation_options_with_x, x=x_direct - ) - res3 = res3.loc[:, ["Boundaries.y[1]", "Boundaries.y[2]"]] - np.testing.assert_allclose(x_direct.to_numpy(), res3.to_numpy()) - with pytest.raises(AssertionError): - np.testing.assert_allclose(x_options.to_numpy(), res3.to_numpy()) + # BROKE UNTIL OMPYTHON DOES SOMETHING + # https://github.com/OpenModelica/OMPython/pull/400 + # https://github.com/OpenModelica/OMPython/pull/399 + # def test_set_boundaries_df(self): + # boundaries_seconds = pd.DataFrame( + # {"x1": [10, 20, 30], "x2": [3, 4, 5]}, + # index=[16675200, 16678800, 16682400], + # ) + # + # simulation_options = { + # "startTime": 16675200, + # "stopTime": 16682400, + # "stepSize": 3600, + # "tolerance": 1e-06, + # "solver": "dassl", + # "boundary": boundaries_seconds + # } + # + # simu = OMModel( + # model_path="TestLib.boundary_test", + # package_path=PACKAGE_DIR / "package.mo", + # lmodel=["Modelica"], + # boundary_table_name="Boundaries" + # ) + # + # res = simu.simulate(simulation_options=simulation_options) + # + # x_direct = pd.DataFrame( + # {"Boundaries.y[1]": [100, 200, 300], "Boundaries.y[2]": [30, 40, 50]}, + # index=pd.date_range("2009-07-13 00:00:00", periods=3, freq="h"), + # ) diff --git a/tutorials/Modelica models Handling.ipynb b/tutorials/Modelica models Handling.ipynb index 9435763..738ba73 100644 --- a/tutorials/Modelica models Handling.ipynb +++ b/tutorials/Modelica models Handling.ipynb @@ -1,38 +1,35 @@ { "cells": [ { - "cell_type": "code", - "execution_count": null, - "id": "b28b6845", "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "import pandas as pd\n", "import os\n", "from pathlib import Path" - ] + ], + "id": "c2f9206d4bfaf2a8", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", - "id": "77e18887-a12e-4198-a72b-dcc420528518", "metadata": {}, + "cell_type": "markdown", "source": [ "# Tutorial for handling Modelica models \n", "The aim of this tutorial is to show how to generate boundary files (.txt) for Modelica models, to load Modelica models on python, set up and launch simulations using **Modelitool**." - ] + ], + "id": "ca00557081abc510" }, { - "cell_type": "markdown", - "id": "13ff37e6-d666-4c81-96d0-7913eeead9d4", "metadata": {}, - "source": [ - "# 1. Proposed model " - ] + "cell_type": "markdown", + "source": "# 1. Proposed model ", + "id": "146511609643d892" }, { - "cell_type": "markdown", - "id": "4b6c113a-ccdb-4fb5-a81d-fa75473028c7", "metadata": {}, + "cell_type": "markdown", "source": [ "In this tutorial, we create of model of following wall, tested a \"real scale\" bench. The Nobatek BEF (Banc d'Essais Façade) provides experimental cells to test building façade solutions. The heat exchanges in a cell are limited on 5 of its faces. The 6th face is dedicated to the tested solution. Internal temperature and hydrometry conditions can be controlled or monitored. External conditions are measured (temperatures and solar radiation). we propose a resistance/capacity approach.\n", "\n", @@ -68,113 +65,38 @@ "\n", "\n", "Initial conditions for the layers temperatures are taken from the measured data." - ] - }, - { - "cell_type": "markdown", - "id": "94f78fac-8238-4755-a876-3b7b63a8c323", - "metadata": {}, - "source": [ - "# 2. Set boundary file\n", - "## Option A: load csv file\n", - "Let's load measurement data on python. We can use this dataframe to define boundary conditions of our model." - ] + ], + "id": "5268ae091f4388cc" }, { - "cell_type": "code", - "execution_count": null, - "id": "71e65ff5-8023-4cd5-884d-c0c1c4118235", - "metadata": {}, - "outputs": [], - "source": [ - "TUTORIAL_DIR = Path(os.getcwd()).as_posix()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fae39639-7b9d-42c7-ae6f-d403b570dd0b", "metadata": {}, - "outputs": [], - "source": [ - "reference_df = pd.read_csv(\n", - " Path(TUTORIAL_DIR) / \"resources/study_df.csv\",\n", - " index_col=0,\n", - " parse_dates=True\n", - ") " - ] - }, - { "cell_type": "markdown", - "id": "16fac796-7386-4688-8066-8f559d36effe", - "metadata": {}, - "source": [ - "## Option B: Create boundary file for Modelica model\n", - "Or, before loading the Modelica model (*.mo), one might want to generate boundary files with the right format (.txt) to use it their model. For this, you can use combitabconvert from modelitool.\n", - "\n", - "Make sure beforehand your data is clean: no NAs, non monotonically increasing index, abberant values, etc.\n", - "\n", - "**_Note : Note that you have to manually configure the file path in\n", - "the combiTimetable of your modelica model_**" - ] + "source": "# 2. Load model from Modelica", + "id": "be76638ef27a38d2" }, { - "cell_type": "code", - "execution_count": null, - "id": "37735475-89a1-4bc2-a8ae-1af8ca73cf45", "metadata": {}, - "outputs": [], - "source": [ - "from modelitool.combitabconvert import df_to_combitimetable" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f2de6868-8016-4fe4-a191-8c4325095fbd", - "metadata": {}, - "outputs": [], - "source": [ - "df_to_combitimetable(\n", - " df=reference_df.loc[\"2018-03-22\":\"2018-03-23\"],\n", - " filename=\"resources/boundary_temp.txt\"\n", - ")" - ] - }, - { "cell_type": "markdown", - "id": "844bd7c8-6bcb-4b3e-ab27-602b016da2fc", - "metadata": {}, - "source": [ - "# 3. Load model from Modelica" - ] + "source": "To avoid loading all ouptuts from modelica model, let's first define a list of outputs that will be included in the dataframe output for any simulation.", + "id": "f3c2b6a0c1cd7f97" }, { - "cell_type": "markdown", - "id": "0ea8c4b4-2eab-429c-a67d-743aaa47a5bd", "metadata": {}, - "source": [ - "To avoid loading all ouptut from modelica model, let's first define a list of output that will be included in the dataframe output for any simulation." - ] - }, - { "cell_type": "code", - "execution_count": null, - "id": "64149508-369a-4a8c-8928-6c71090b4428", - "metadata": {}, - "outputs": [], "source": [ "output_list = [\n", " \"T_coat_ins.T\",\n", " \"T_ins_ins.T\",\n", " \"Tw_out.T\"\n", "]" - ] + ], + "id": "77591ad834ae9cf9", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", - "id": "b092bb4236cc85f3", "metadata": {}, + "cell_type": "markdown", "source": [ "Now, we can load the *om file.\n", "\n", @@ -189,39 +111,38 @@ " - `tolerance`: Numerical tolerance for the solver\n", " - `solver`: Solver to use (e.g. \"dassl\")\n", " - `outputFormat`: \"mat\" or \"csv\" for results format\n", - " - `x`: Boundary conditions as a DataFrame (optional)\n", "- `output_list` (optional): List of variables to include in simulation results\n", "- `lmodel` (optional): List of required Modelica libraries (e.g. [\"Modelica\"])" - ] + ], + "id": "a63c4043198334b1" }, { - "cell_type": "code", - "execution_count": null, - "id": "3264057e-66ef-41c6-b75a-6efd28748f8c", "metadata": {}, + "cell_type": "code", + "source": "from modelitool.simulate import OMModel", + "id": "480baab689c43bd6", "outputs": [], - "source": [ - "from modelitool.simulate import OMModel" - ] + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, - "id": "8d9bfb90-3f07-49e9-9d7f-314ec3a07fc1", "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ + "TUTORIAL_DIR = Path(os.getcwd()).as_posix()\n", + "\n", "simu_OM = OMModel(\n", " model_path=Path(TUTORIAL_DIR) / \"resources/etics_v0.mo\",\n", " output_list=output_list,\n", " lmodel=[\"Modelica\"],\n", ")" - ] + ], + "id": "f00ab515289e7a00", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", - "id": "766241a0-95b8-4916-9206-1ca240b2f361", "metadata": {}, + "cell_type": "markdown", "source": [ "#### Set up simulation options \n", "\n", @@ -230,51 +151,44 @@ "In Modelica, startTime and stopTime correspond to the number\n", "of seconds since the beginning of the year. \n", "\n", - "The values can be found in the file created earlier using df_to_combitimetable . Another way is to use the index of the DataFrame we just created.\n", - "The modelitool function modelitool.combitabconvert.datetime_to_seconds\n", - "helps you convert datetime index in seconds.\n" - ] + "The values can be generated using the modelitool function modelitool.combitabconvert.datetime_to_seconds.\n" + ], + "id": "7dbbb56f26d95f62" }, { - "cell_type": "code", - "execution_count": null, - "id": "b26a8f6e-2f1a-41ed-a74e-dc9a41435110", "metadata": {}, + "cell_type": "code", + "source": "from modelitool.combitabconvert import datetime_to_seconds", + "id": "32529ae64f5d22b9", "outputs": [], - "source": [ - "from modelitool.combitabconvert import datetime_to_seconds" - ] + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, - "id": "a7472557-a5af-49bf-8ffc-08f30741e4c9", "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ - "simulation_df = reference_df.loc[\"2018-03-22\":\"2018-03-23\"]\n", - "second_index = datetime_to_seconds(simulation_df.index)" - ] + "simulation_range= pd.date_range(\"2018-03-22\", \"2018-03-23\", freq=\"1h\")\n", + "second_index = datetime_to_seconds(simulation_range)" + ], + "id": "a8ba2b021e132ace", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", - "id": "9d771fd7-bdde-4b90-9d3e-699d3f488099", "metadata": {}, + "cell_type": "markdown", "source": [ "- stepSize is the simulation timestep size. In this case it's 5 min or\n", "300 sec.\n", - "- tolerance and solver are related to solver configuration\n", + "- tolerance and solver are related to solver configuration -\n", "do not change if you don't need to.\n", - "- outputFormat can be either csv or mat. csv will enable faster data handling during sensitivity analyses and optimizations.\n", - "- x: as the boundary conditions. If not given here, it can still be provided in method `simulate`." - ] + "- outputFormat can be either csv or mat. csv will enable faster data handling during sensitivity analyses and optimizations." + ], + "id": "cfd6ba4c8b3900c7" }, { - "cell_type": "code", - "execution_count": null, - "id": "604aa9ed-b37b-4e61-b96e-a6dfdad42ca7", "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "simulation_opt = {\n", " \"startTime\": second_index[0],\n", @@ -282,98 +196,132 @@ " \"stepSize\": 300,\n", " \"tolerance\": 1e-06,\n", " \"solver\": \"dassl\",\n", - " \"outputFormat\": \"csv\"\n", + " \"outputFormat\": \"csv\",\n", + " \"time_index\": \"datetime\"\n", "}" - ] + ], + "id": "f0219a475c23b35", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", - "id": "37bab369-bbcf-40ff-ba5f-fc2d78a3de32", "metadata": {}, - "source": [ - "# 4. Run the simulation" - ] + "cell_type": "markdown", + "source": "# 4. Running a simulation", + "id": "890a69283615199" }, { - "cell_type": "markdown", - "id": "02e59be3-e4f4-44f0-adcd-66a43d200146", "metadata": {}, + "cell_type": "markdown", "source": [ - "Set the initial and parameter values in a dictionary. They can either be set before simluation (with `set_param_dict()` method, or when using method `simulate()`. Each change of paramter value overwrite the previous one. " - ] + "To run a simulation, use the `simulate()` method.\n", + "\n", + "- `property_dict` (optionnal) : dictionary of model parameters to override before the run.\n", + "- `simulation_options` (optionnal if they were not specified when the model was instantiated): standard OpenModelica options such as `\"startTime\"`, `\"stopTime\"`, `\"stepSize\"`, `\"tolerance\"`, `\"solver\"`, `\"outputFormat\"`.\n", + "- `simflags` *(str)*: additional OpenModelica simulation flags (⚠️ except `override`)\n", + "\n", + "The output is a `pandas.DataFrame` with the simulation results." + ], + "id": "2e8eb80c0ca29d02" }, { - "cell_type": "code", - "execution_count": null, - "id": "c43d997c-5cac-4149-97a1-f849dbac0d4c", "metadata": {}, + "cell_type": "code", + "source": "simu_OM.simulate()", + "id": "be1ab9bbb6b4c64d", "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "The initial values and parameter values can either be set before simulation (with the `set_param_dict()` method), or when using `simulate()`. Each change of parameter value overwrite the previous one.", + "id": "45e5b53da6acc683" + }, + { + "metadata": {}, + "cell_type": "code", "source": [ "parameter_dict_OM = {\n", " \"Twall_init\": 24.81 + 273.15,\n", " \"Tins1_init\": 19.70 + 273.15,\n", " \"Tins2_init\": 10.56 + 273.15,\n", " \"Tcoat_init\": 6.4 + 273.15,\n", - " 'Lambda_ins.k': 0.04,\n", - "}" - ] + " 'Lambda_ins.k': 0.454,\n", + "}\n", + "\n", + "simu_OM.set_param_dict(parameter_dict_OM)" + ], + "id": "3140851e7d7901d0", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": "simu_OM.get_parameters()", + "id": "da0c0a5f64a2144a", + "outputs": [], + "execution_count": null }, { + "metadata": {}, "cell_type": "markdown", - "id": "65fd55a9-959f-4bef-9ee2-14d0c617b75b", + "source": "The new set values of parameters in the model can be checked using `get_property_values()`:", + "id": "7f712ee0e94341bb" + }, + { + "metadata": {}, + "cell_type": "code", + "source": "simu_OM.get_property_values(parameter_dict_OM)", + "id": "7d3d8bec06aacfda", + "outputs": [], + "execution_count": null + }, + { "metadata": {}, + "cell_type": "markdown", "source": [ - "Simulation flags can also be specified in simulate() method. Overview of possible simulation flags can be found here: https://openmodelica.org/doc/OpenModelicaUsersGuide/latest/simulationflags.html. Note that the simulation flag override cannot be used, as it was already used in class OMModel with simulation_options.\n", - "\n", - "If x boundary conditions do not\n", - " have a DateTime index (seconds int), a year can be specified to convert\n", - " int seconds index to a datetime index. If simulation spans overs several\n", - " years, it shall be the year when it begins.\n", - "\n", - "The output of the `simulate()` method is a dataframe, containing the outputs listed in output_list." - ] + "Additional options can be specified in the `simulate()` method. The output is a Pandas DataFrame containing the results.\n", + "If an `output_list` was provided when creating the model, only those variables are included." + ], + "id": "b095eaccd571da58" }, { - "cell_type": "code", - "execution_count": null, - "id": "d52fdda8-4115-4a13-a0c3-b05459a0f807", "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "init_res_OM = simu_OM.simulate(\n", - " simflags = \"-initialStepSize=60 -maxStepSize=3600 -w -lv=LOG_STATS\",\n", - " parameter_dict=parameter_dict_OM,\n", - " x=reference_df,\n", - " year=2024,\n", + " simulation_options=simulation_opt,\n", + " property_dict=parameter_dict_OM,\n", ")\n", "init_res_OM.head()" - ] + ], + "id": "21e8d442bdce1a75", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", - "id": "6b20f490-36ec-4da1-9f80-111443ae4a1f", "metadata": {}, - "source": [ - "Plotted results" - ] + "cell_type": "markdown", + "source": "Plotted results", + "id": "6fd557f394da6246" }, { - "cell_type": "code", - "execution_count": null, - "id": "e34b7144-b823-4796-8fa6-2f01f8bf2d52", "metadata": {}, + "cell_type": "code", + "source": "init_res_OM.plot()", + "id": "e236a9338e6baab2", "outputs": [], - "source": [ - "init_res_OM.plot()" - ] + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, - "id": "c0b58fca-65de-462d-b6a5-fd34707db05b", "metadata": {}, + "cell_type": "code", + "source": "", + "id": "a792746dfd05dd9c", "outputs": [], - "source": [] + "execution_count": null } ], "metadata": {