diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6aa6a79..9b67531 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -15,11 +15,12 @@ jobs: make - name: install dependencies run: | - pip install pytest pytest-cov pytest-mock + pip install pytest pytest-cov pytest-mock pytest-xdist pip install numpy - - name: result tests + - name: result simulation system tests run: | - pytest Tests/SystemTests + # We're providing the path to the EXE based on the prior build solver step. + pytest Tests/SystemTests -n auto --relativeExePath="build_skyline/bin/OneDSolver" - name: result unit tests run: | cd build_skyline diff --git a/.gitignore b/.gitignore index c28e83e..e0a89cb 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,6 @@ build # Vscode .vscode/ + +# Python +venv/ diff --git a/CMakeLists.txt b/CMakeLists.txt index 339d4f2..07377d6 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.11) +CMAKE_MINIMUM_REQUIRED(VERSION 3.18) # COMMON SETTINGS PROJECT(OneDSolver) @@ -71,6 +71,17 @@ ELSEIF(sparseSolverType STREQUAL "csparse") "${CMAKE_CURRENT_SOURCE_DIR}/Code/Source/sparse/csparse/*.h") ENDIF() +# NLOHMANN JSON FOR SERIALIZATION +include(FetchContent) +FetchContent_Declare( + nlohmann_json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.11.2 # You can specify a version or use the latest tag +) +FetchContent_MakeAvailable(nlohmann_json) +# Global include for nlohmann headers +include_directories(${nlohmann_json_SOURCE_DIR}/single_include) + # COPY DATASET FOLDER # FILE(COPY "${CMAKE_CURRENT_SOURCE_DIR}/tests" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") @@ -111,16 +122,6 @@ if (BUILD_SV_INSTALLER) add_subdirectory("${CMAKE_SOURCE_DIR}/Distribution") ENDIF() -# NLOHMANN JSON FOR SERIALIZATION -include(FetchContent) -FetchContent_Declare( - nlohmann_json - GIT_REPOSITORY https://github.com/nlohmann/json.git - GIT_TAG v3.11.2 # You can specify a version or use the latest tag -) -FetchContent_MakeAvailable(nlohmann_json) -target_include_directories(${PROJECT_NAME} PUBLIC ${nlohmann_json_SOURCE_DIR}/single_include) - # Unit tests and Google Test if(ENABLE_UNIT_TEST) @@ -174,13 +175,6 @@ if(ENABLE_UNIT_TEST) # handle both use cases. target_link_libraries(UnitTestsExecutable gtest gtest_main pthread) - # We need to include Nlohmann when compiling the unit test executable - # A better strategy would be to build up a list of what we're going to - # include and reuse it. - # Another option: we could simply build the library of cvOneD functionality - # and link against that both here and in the main executable. - target_include_directories(UnitTestsExecutable PRIVATE ${nlohmann_json_SOURCE_DIR}/single_include) - # It's likely we'll need to include additional directories for some # versions of the unit tests. That can be updated when/if we update # the general strategy. diff --git a/Code/Source/cvOneDModelManager.cxx b/Code/Source/cvOneDModelManager.cxx index 9d3887d..e495d69 100644 --- a/Code/Source/cvOneDModelManager.cxx +++ b/Code/Source/cvOneDModelManager.cxx @@ -188,7 +188,7 @@ int cvOneDModelManager::CreateNode(char * nodeName,double x,double y,double z){ } -int cvOneDModelManager::CreateJoint(char * jointName,double x,double y,double z, +int cvOneDModelManager::CreateJoint(const char * jointName,double x,double y,double z, int numInSegs,int numOutSegs, int *InSegs,int *OutSegs){ diff --git a/Code/Source/cvOneDModelManager.h b/Code/Source/cvOneDModelManager.h index 152dc2a..18c5f11 100644 --- a/Code/Source/cvOneDModelManager.h +++ b/Code/Source/cvOneDModelManager.h @@ -69,7 +69,7 @@ class cvOneDModelManager{ // CREATE JOINT - int CreateJoint(char* jointName,double x,double y,double z, + int CreateJoint(const char* jointName,double x,double y,double z, int numInSegs,int numOutSegs, int *InSegs,int *OutSegs); diff --git a/Code/Source/cvOneDOptions.h b/Code/Source/cvOneDOptions.h index 54313c9..b1a82c9 100644 --- a/Code/Source/cvOneDOptions.h +++ b/Code/Source/cvOneDOptions.h @@ -35,6 +35,7 @@ #include #include #include +#include #include "cvOneDTypes.h" #include "cvOneDException.h" @@ -49,21 +50,14 @@ struct options{ // NODE DATA cvStringVec nodeName; - // Note that these coordinates are also used - // for the joints through an implicit mapping cvDoubleVec nodeXcoord; cvDoubleVec nodeYcoord; cvDoubleVec nodeZcoord; // JOINT DATA cvStringVec jointName; - - // "jointNode" isn't used currently -- but we want - // to start integrating it. It can be used to make - // the mapping from joint->node explicit. Right - // now, the mapping is implicit: each joint is - // associated with the node of the same index in - // the list of nodes. + // jointNode: name of the associated node that we will use + // to get the XYZ coordinates when creating the joint. cvStringVec jointNode; cvStringVec jointInletName; @@ -121,7 +115,12 @@ struct options{ double convergenceTolerance; int useIV; int useStab; - string outputType; + + // These are to preserve legacy behavior and are + // expected to be eventually migrated into a + // post-processing step. + string outputType = "TEXT"; + std::optional vtkOutputType = std::nullopt; }; void validateOptions(options const& opts); diff --git a/Code/Source/cvOneDOptionsJsonParser.cxx b/Code/Source/cvOneDOptionsJsonParser.cxx index a165af8..0f70247 100644 --- a/Code/Source/cvOneDOptionsJsonParser.cxx +++ b/Code/Source/cvOneDOptionsJsonParser.cxx @@ -206,8 +206,13 @@ void parseSolverOptions(const nlohmann::ordered_json& jsonData, options& opts) t opts.useIV = solverOptions.at("useIV").get(); opts.useStab = solverOptions.at("useStab").get(); - // Doesn't seem like this is used...but we'll provide a default anyway - opts.outputType = solverOptions.value("outputType", "None"); + // Until this is migrated elsewhere, we'll (optionally) store the solver options. + if(solverOptions.contains("outputType")){ + opts.outputType = solverOptions.at("outputType").get(); + } + if(solverOptions.contains("vtkOutputType")){ + opts.vtkOutputType = solverOptions.at("vtkOutputType").get(); + } } catch (const std::exception& e) { throw std::runtime_error("Error parsing 'solverOptions': " + std::string(e.what())); diff --git a/Code/Source/cvOneDOptionsJsonSerializer.cxx b/Code/Source/cvOneDOptionsJsonSerializer.cxx index be9bf29..34b9737 100644 --- a/Code/Source/cvOneDOptionsJsonSerializer.cxx +++ b/Code/Source/cvOneDOptionsJsonSerializer.cxx @@ -81,29 +81,22 @@ nlohmann::ordered_json serialize_joint_data(const options& opts) { nlohmann::ordered_json j = nlohmann::ordered_json::array(); // Check consistency of the joint inlet and outlet vectors - check_consistent_size("joints", opts.jointName, + check_consistent_size("joints", opts.jointName, opts.jointNode, opts.jointInletListNames, opts.jointInletListNumber, opts.jointOutletListNames, opts.jointOutletListNumber, opts.jointInletList, opts.jointOutletList); size_t jointCount = opts.jointName.size(); - // There must be enough nodes for us to make associations. This is - // an implicit assumption made in the current joint creation code. - if( opts.nodeName.size() < jointCount ){ - throw std::runtime_error("The implicit joint-node association requires that there be at least as many nodes as there are joints."); - } - for (size_t i = 0; i < jointCount; ++i) { nlohmann::ordered_json joint; // Add JOINT data - // We're adding the node based on the index so we - // can start incorporating the explicit association - // from the implicit one currently used in the code. + // Now that we're actually populating the joint node associations + // when deserializing the legacy format, we can utilize them here. joint["joint"] = { - {"id", "J" + std::to_string(i + 1)}, - {"associatedNode", opts.nodeName.at(i)}, + {"id", opts.jointName.at(i)}, + {"associatedNode", opts.jointNode.at(i)}, {"inletName", opts.jointInletListNames.at(i)}, {"outletName", opts.jointOutletListNames.at(i)} }; @@ -225,7 +218,13 @@ nlohmann::ordered_json serializeSolverOptions(options const& opts) { solverOptions["convergenceTolerance"] = opts.convergenceTolerance; solverOptions["useIV"] = opts.useIV; solverOptions["useStab"] = opts.useStab; + + // For now, we're serializing the output data. + // In the future, we'll want to migrate these. solverOptions["outputType"] = opts.outputType; + if(opts.vtkOutputType){ + solverOptions["vtkOutputType"] = *opts.vtkOutputType; + } return solverOptions; } diff --git a/Code/Source/cvOneDOptionsLegacySerializer.cxx b/Code/Source/cvOneDOptionsLegacySerializer.cxx index f938995..594c53d 100644 --- a/Code/Source/cvOneDOptionsLegacySerializer.cxx +++ b/Code/Source/cvOneDOptionsLegacySerializer.cxx @@ -36,7 +36,6 @@ #include #include "cvOneDUtility.h" -#include "cvOneDGlobal.h" namespace cvOneD{ @@ -126,10 +125,25 @@ void readOptionsLegacyFormat(string inputFile, options* opts){ try{ // Get Joint Name opts->jointName.push_back(tokenizedString[1]); -// opts->jointNode.push_back(atof(tokenizedString[2].c_str())); - opts->jointNode.push_back(tokenizedString[2]); + + // The legacy input files included a "joint node", but it is + // never utilized in simulation. The legacy behavior was, instead + // to simply associate each joint with the sequentially listed + // nodeXcoord, nodeYcoord, and nodeZcoord. + // + // To preserve this legacy behavior, we're not going to deserialize + // the jointNode. Instead, we're going to replace "jointNode" with + // the sequential node item name. That's **wrong** -- we should be + // using "jointNode"! + // But! it preserves the legacy behavior of the existing legacy + // options files when we refactor the actual utilization of the + // options to use the jointNode. + // + // The actual population of the jointNode option is done in a + // post-processing step after we finish processing the file. opts->jointInletName.push_back(tokenizedString[3]); opts->jointOutletName.push_back(tokenizedString[4]); + }catch(...){ throw cvException(string("ERROR: Invalid JOINT Format. Line " + to_string(lineCount) + "\n").c_str()); } @@ -258,22 +272,13 @@ void readOptionsLegacyFormat(string inputFile, options* opts){ }else if(tokenizedString.size() < 2){ throw cvException(string("ERROR: Not enough parameters for OUTPUT token. Line " + to_string(lineCount) + "\n").c_str()); } - // Output Type - if(upper_string(tokenizedString[1]) == "TEXT"){ - cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_TEXT; - }else if(upper_string(tokenizedString[1]) == "VTK"){ - cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_VTK; - }else if(upper_string(tokenizedString[1]) == "BOTH"){ - cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_BOTH; - }else{ - throw cvException("ERROR: Invalid OUTPUT Type.\n"); - } + + // Collect and store the output type data + opts->outputType = tokenizedString[1]; if(tokenizedString.size() > 2){ - cvOneDGlobal::vtkOutputType = atoi(tokenizedString[2].c_str()); - if(cvOneDGlobal::vtkOutputType > 1){ - throw cvException("ERROR: Invalid OUTPUT VTK Type.\n"); - } + opts->vtkOutputType = atoi(tokenizedString[2].c_str()); } + }else if(upper_string(tokenizedString[0]) == std::string("DATATABLE")){ // printf("Found Data Table.\n"); try{ @@ -361,6 +366,17 @@ void readOptionsLegacyFormat(string inputFile, options* opts){ // Increment Line Count lineCount++; } + + // Post-process: to preserve legacy behavior, we're + // making each joint node the corresponding sequential + // node from the node array. See comment above for additional details. + if(opts->jointName.size() > opts->nodeName.size()){ + throw cvException("ERROR: there must be at least as many nodes as there are joints.\n"); + } + for(size_t k = 0; k < opts->jointName.size(); ++k){ + opts->jointNode.push_back(opts->nodeName.at(k)); + } + // Close File infile.close(); } diff --git a/Code/Source/main.cxx b/Code/Source/main.cxx index 26e30f0..916b636 100644 --- a/Code/Source/main.cxx +++ b/Code/Source/main.cxx @@ -31,6 +31,7 @@ #include #include +#include #include "cvOneDGlobal.h" #include "cvOneDModelManager.h" @@ -75,6 +76,23 @@ int getDataTableIDFromStringKey(string key){ // =============================== // CREATE MODEL AND RUN SIMULATION // =============================== +namespace{ + +size_t findJointNodeIndexOrThrow(const auto& jointNodeName, const auto& nodeNames, const auto& jointName){ + // Return the index of "nodeNames" that corresponds to the "jointNodeName" + // or throw a context-specific error. + + auto const iter = std::find(nodeNames.begin(), nodeNames.end(), jointNodeName); + if (iter == nodeNames.end()) { + std::string const errMsg = "ERROR: The node '" + jointNodeName + "' required by joint '" + + jointName + "' was not found in the list of nodes."; + throw cvException(errMsg.c_str()); + } + return std::distance(nodeNames.begin(), iter); +} + +} // namespace + void createAndRunModel(const cvOneD::options& opts) { // MESSAGE @@ -142,9 +160,15 @@ void createAndRunModel(const cvOneD::options& opts) { asOutlets[loopB] = opts.jointOutletList[jointOutletID][loopB]; } } + + // Find the index of the indicated node. + auto const jointName = opts.jointName.at(loopA); + auto const nodeIndex = findJointNodeIndexOrThrow( + opts.jointNode.at(loopA), opts.nodeName, jointName); + // Finally Create Joint - jointError = oned->CreateJoint((char*)opts.jointName[loopA].c_str(), - opts.nodeXcoord[loopA], opts.nodeYcoord[loopA], opts.nodeZcoord[loopA], + jointError = oned->CreateJoint(jointName.c_str(), + opts.nodeXcoord[nodeIndex], opts.nodeYcoord[nodeIndex], opts.nodeZcoord[nodeIndex], totJointInlets, totJointOutlets, asInlets, asOutlets); if(jointError == CV_ERROR) { throw cvException(string("ERROR: Error Creating JOINT " + to_string(loopA) + "\n").c_str()); @@ -307,6 +331,32 @@ void createAndRunModel(const cvOneD::options& opts) { // ============== // RUN ONEDSOLVER // ============== + +namespace { + +void setOutputGlobals(const cvOneD::options& opts){ + + if(upper_string(opts.outputType) == "TEXT"){ + cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_TEXT; + }else if(upper_string(opts.outputType) == "VTK"){ + cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_VTK; + }else if(upper_string(opts.outputType) == "BOTH"){ + cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_BOTH; + }else{ + throw cvException("ERROR: Invalid OUTPUT Type.\n"); + } + + if(opts.vtkOutputType){ + cvOneDGlobal::vtkOutputType = *opts.vtkOutputType; + if(cvOneDGlobal::vtkOutputType > 1){ + throw cvException("ERROR: Invalid OUTPUT VTK Type.\n"); + } + } + +} + +} // namespace + void runOneDSolver(const cvOneD::options& opts){ // Model Checking @@ -321,12 +371,20 @@ void runOneDSolver(const cvOneD::options& opts){ string jsonFilename("echo.json"); cvOneD::writeJsonOptions(opts, jsonFilename); + // Per the existing behavior, we'll set output globals + // from the options. TODO: we should really just + // consume option data from the options rather + // than setting it in a global variable. Besides that, + // we should move the VTK options to a postprocessor + // rather than have them in the solver options. + setOutputGlobals(opts); + // Create Model and Run Simulation createAndRunModel(opts); } -void convertOptions(const std::string& legacyFilename, const std::string& jsonFilename){ +void convertLegacyToJsonOptions(const std::string& legacyFilename, const std::string& jsonFilename){ // Convert legacy format to JSON and print it to file cvOneD::options opts{}; cvOneD::readOptionsLegacyFormat(legacyFilename,&opts); @@ -344,6 +402,14 @@ struct ArgOptions{ std::optional jsonConversionOutput = std::nullopt; }; +std::string removeQuotesIfPresent(const std::string& str) { + std::string result = str; + if (result.front() == '"' && result.back() == '"') { + result = result.substr(1, result.length() - 2); + } + return result; +} + ArgOptions parseInputArgs(int argc, char** argv) { // Right now, we don't check for bad arguments but we could // do that in here if we want more coherent behavior. @@ -353,13 +419,13 @@ ArgOptions parseInputArgs(int argc, char** argv) { std::string arg = argv[i]; if (arg == "-jsonInput" && i + 1 < argc) { - options.jsonInput = argv[i + 1]; + options.jsonInput = removeQuotesIfPresent(argv[i + 1]); i++; } if (arg == "-legacyToJson" && i + 2 < argc) { - options.legacyConversionInput = argv[i + 1]; - options.jsonConversionOutput = argv[i + 2]; + options.legacyConversionInput = removeQuotesIfPresent(argv[i + 1]); + options.jsonConversionOutput = removeQuotesIfPresent(argv[i + 2]); i++; } @@ -368,6 +434,12 @@ ArgOptions parseInputArgs(int argc, char** argv) { return options; } +cvOneD::options readLegacyOptions(std::string const& inputFile){ + cvOneD::options opts{}; + cvOneD::readOptionsLegacyFormat(inputFile,&opts); + return opts; +} + // Parse the incoming arguments and read the options file. // Also performs option file conversions if requested by user. // @@ -390,10 +462,8 @@ std::optional parseArgsAndHandleOptions(int argc, char** argv){ // Legacy behavior if(argc == 2){ - string inputFile(argv[1]); - cvOneD::options opts{}; - cvOneD::readOptionsLegacyFormat(inputFile,&opts); - return opts; + string inputFile{removeQuotesIfPresent(argv[1])}; + return readLegacyOptions(inputFile); } // Default behavior @@ -401,7 +471,7 @@ std::optional parseArgsAndHandleOptions(int argc, char** argv){ // Conversion if(argOptions.legacyConversionInput && argOptions.jsonConversionOutput){ - convertOptions(*argOptions.legacyConversionInput, + convertLegacyToJsonOptions(*argOptions.legacyConversionInput, *argOptions.jsonConversionOutput); } diff --git a/README.md b/README.md index ea56eed..9ba51ac 100755 --- a/README.md +++ b/README.md @@ -7,6 +7,81 @@ Some of the main contributors are I. Vignon-Clementel, N. Wilson, J. Wan, B. Ste The one-dimensional equations for the flow of a Newtonian, incompressible fluid in a deforming, elastic domain consist of the continuity equation, a single axial momentum balance equation, a constitutive equation, and suitable initial and boundary conditions. +## Usage +Usage assumes that you have built from the source as described below. + +### Basic usage +There are three supported usages for the generated executable. +* The quotations around the input args are optional +* Input file paths can be specified as a local file name or an absolute path + +#### 1. Run simulation using a legacy input file +Run the simulation with with a legacy ".in" input file. +~~~ +svOneDSolver ".in" +~~~ + +#### 2. Convert legacy input file to JSON +Convert a legacy input file to a JSON input file +~~~ +svOneDSolver -legacyToJson ".in" ".json" +~~~ + +#### 3. Run simulation using a JSON input file +Run the simulation using a JSON input file +~~~ +svOneDSolver -jsonInput ".json" +~~~ + +### Manually run tests +System and unit tests can be run using ctest and pytest commands. + +The following assume: +* A source folder "svOneDSolver/" and a neighboring build folder "svOneDSolver_build/" +* The source has been built the using "cmake" and "make" commands (as described above) + +#### Unit tests +Navigate to the build folder and run the ctest command. + +~~~ +ctest +~~~ + +#### System tests +The system tests run simulations and verify some of the resulting output. + +You must have python and pytest installed. + +Navigate to the source folder, and run the following: + +~~~ +pytest Tests/SystemTests --relativeExePath="../svOneDSolver_build/bin/OneDSolver" +~~~ + +If the binary is in a different location, specify the correct location. + +
+Running system tests in parallel +
+ +This offers some details for how to set up a python environment and install the relevant packages on linux, and then run the tests in parallel. Note that depending on the OS this setup process will vary, but the basic steps will remain the same. + +Navigate to the source folder, and run the following: + +``` +sudo apt install python3-venv +python3 -m venv venv +source venv/bin/activate +pip install numpy pytest-xdist +``` + +With `xdist` available, run the tests in parallel (`-n `). + +``` +pytest Tests/SystemTests --relativeExePath="../svOneDSolver_build/bin/OneDSolver" -n auto +``` +
+ ## Installation ### Prerequisites diff --git a/Tests/SystemTests/.gitignore b/Tests/SystemTests/.gitignore new file mode 100644 index 0000000..ba0430d --- /dev/null +++ b/Tests/SystemTests/.gitignore @@ -0,0 +1 @@ +__pycache__/ \ No newline at end of file diff --git a/Tests/SystemTests/conftest.py b/Tests/SystemTests/conftest.py index 242abfb..75cb82e 100644 --- a/Tests/SystemTests/conftest.py +++ b/Tests/SystemTests/conftest.py @@ -8,3 +8,7 @@ def tempdir(): """Temporary directory for test purposes.""" with TemporaryDirectory() as tempdir: yield tempdir + +def pytest_addoption(parser): + parser.addoption("--relativeExePath", action="store") + \ No newline at end of file diff --git a/Tests/SystemTests/test_integration.py b/Tests/SystemTests/test_integration.py index aa957a4..cf1d751 100644 --- a/Tests/SystemTests/test_integration.py +++ b/Tests/SystemTests/test_integration.py @@ -5,46 +5,67 @@ import re import subprocess import numpy as np +import pytest from collections import defaultdict +@pytest.fixture(scope="session") +def exePath(pytestconfig): + relativePath = pytestconfig.getoption("relativeExePath") -def run_test_case_by_name(name, testdir): - """Run a test case by its case name. + assert relativePath, "--relativeExePath is required. Please " \ + "provide the (relative) path to the executable." - Args: - name: Name of the test case. - testdir: Directory for performing the simulation. - """ - # file where executable has been built - build_dir = 'build_skyline' + return os.path.abspath(relativePath) + + +def run_simulation_and_assert_results(testCaseName, tmpdir, exePath, resultData, run_func): + # Run the test case using the provided function (either legacy or converted JSON) + results = run_test_case_by_name(testCaseName, tmpdir, exePath, run_func) - # test file - testfile = os.path.join(os.path.dirname(__file__), 'cases', name + '.in') - testfile_tmp = os.path.join(testdir, name + '.in') + # Loop over each result check and assert + for field, seg, node, time, fun, expectedValue, rtol in resultData: + resultValue = get_result_value(results, field, seg, node, time, fun) - # copy input file to temporary test dir - shutil.copyfile(testfile, testfile_tmp) + # Assertion with details + assert resultValue == pytest.approx(expectedValue, rel=rtol, abs=1e-8), \ + f"\n\nTest failed for {testCaseName} - Field: {field}, Seg: {seg}, Node: {node}, Time: {time}, Fun: {fun}. " \ + f"\nExpected: {expectedValue}, Got: {resultValue}\n" + + +def run_test_case_by_name(name, testDir, exePath, run_func): + # test file absolute path + inputFilePath = os.path.join(os.path.dirname(__file__), 'cases', name + '.in') # run 1D simulation - run_oned(build_dir, testfile_tmp) + run_func(inputFilePath, testDir, exePath) # extract results - return read_results_1d('.', 'results_' + name + '_seg*') + return read_results_1d(testDir, 'results_' + name + '_seg*') -def run_oned(build_dir, name): - """ - Executes svOneDSolver with given input file - """ - # name of svOneDSolver executable - exe = os.path.join(build_dir, 'bin', 'OneDSolver') +def run_oned(inputFilePath, testDir, exePath): + # Run simulation in the testDir to keep the current directory clean. + # Because we are in a temporary directory, we have to pass the + # locations as absolute paths. + try: + subprocess.check_output([exePath, inputFilePath], cwd=testDir) + except subprocess.CalledProcessError as err: + raise RuntimeError('Test failed. svOneDSolver returned error:\n' + err.output.decode("utf-8")) - # name of input file - inp = os.path.join('cases', name) - # run simulation +def run_converted_json_input(inputFilePath, testDir, exePath): + # Run the simulations again, but this time use the JSON input file try: - subprocess.check_output([exe, inp]) + tempJsonFilePath = "tempInput.json" + + conversionCommandList = [exePath, "-legacyToJson", f'"{inputFilePath}"', f'"{tempJsonFilePath}"'] + runCommandList = [exePath, "-jsonInput", f'"{tempJsonFilePath}"'] + + # First, convert to legacy ".in" file to a JSON input file + subprocess.check_output(conversionCommandList, cwd=testDir) + + # Now, run the actual simulation with the JSON input file + subprocess.check_output(runCommandList, cwd=testDir) except subprocess.CalledProcessError as err: raise RuntimeError('Test failed. svOneDSolver returned error:\n' + err.output.decode("utf-8")) @@ -75,7 +96,7 @@ def read_results_1d(res_dir, name): return res -def get_result(results, field, seg, node, time, fun): +def get_result_value(results, field, seg, node, time, fun): """ Read results and select function """ @@ -94,131 +115,146 @@ def get_result(results, field, seg, node, time, fun): else: raise ValueError('Unknown result type ' + fun) +# Each element in the test cases list is a tuple structured as follows: +# +# (, ) +# - : A string representing the name of the test case +# - : A list of tuples, each containing: +# (, , ,