From 28f2232ffcaf47952c54cb7e30497913b41cb53c Mon Sep 17 00:00:00 2001 From: arsLibera Date: Sat, 1 Mar 2025 08:28:51 -0500 Subject: [PATCH 1/8] Enable direct joint node assocations by using the jointNode options variable when the model is created. This uses joint node associations defined in JSON input files. The legacy serializer behavior is preserved -- instead of using the listed joint node, we use sequential nodes from the node list to correspond to the joints. That's how createAndRunModel used to extract the node associations. --- Code/Source/cvOneDModelManager.cxx | 2 +- Code/Source/cvOneDModelManager.h | 2 +- Code/Source/cvOneDOptionsLegacySerializer.cxx | 30 +++++++++++++++++-- Code/Source/main.cxx | 28 +++++++++++++++-- Tests/UnitTests/TestLegacySerializer.cxx | 8 ++++- 5 files changed, 63 insertions(+), 7 deletions(-) diff --git a/Code/Source/cvOneDModelManager.cxx b/Code/Source/cvOneDModelManager.cxx index 9d3887d..e495d69 100644 --- a/Code/Source/cvOneDModelManager.cxx +++ b/Code/Source/cvOneDModelManager.cxx @@ -188,7 +188,7 @@ int cvOneDModelManager::CreateNode(char * nodeName,double x,double y,double z){ } -int cvOneDModelManager::CreateJoint(char * jointName,double x,double y,double z, +int cvOneDModelManager::CreateJoint(const char * jointName,double x,double y,double z, int numInSegs,int numOutSegs, int *InSegs,int *OutSegs){ diff --git a/Code/Source/cvOneDModelManager.h b/Code/Source/cvOneDModelManager.h index 152dc2a..18c5f11 100644 --- a/Code/Source/cvOneDModelManager.h +++ b/Code/Source/cvOneDModelManager.h @@ -69,7 +69,7 @@ class cvOneDModelManager{ // CREATE JOINT - int CreateJoint(char* jointName,double x,double y,double z, + int CreateJoint(const char* jointName,double x,double y,double z, int numInSegs,int numOutSegs, int *InSegs,int *OutSegs); diff --git a/Code/Source/cvOneDOptionsLegacySerializer.cxx b/Code/Source/cvOneDOptionsLegacySerializer.cxx index f938995..ab4ddf5 100644 --- a/Code/Source/cvOneDOptionsLegacySerializer.cxx +++ b/Code/Source/cvOneDOptionsLegacySerializer.cxx @@ -126,10 +126,25 @@ void readOptionsLegacyFormat(string inputFile, options* opts){ try{ // Get Joint Name opts->jointName.push_back(tokenizedString[1]); -// opts->jointNode.push_back(atof(tokenizedString[2].c_str())); - opts->jointNode.push_back(tokenizedString[2]); + + // The legacy input files included a "joint node", but it is + // never utilized in simulation. The legacy behavior was, instead + // to simply associate each joint with the sequentially listed + // nodeXcoord, nodeYcoord, and nodeZcoord. + // + // To preserve this legacy behavior, we're not going to deserialize + // the jointNode. Instead, we're going to replace "jointNode" with + // the sequential node item name. That's **wrong** -- we should be + // using "jointNode"! + // But! it preserves the legacy behavior of the existing legacy + // options files when we refactor the actual utilization of the + // options to use the jointNode. + // + // The actual population of the jointNode option is done in a + // post-processing step after we finish processing the file. opts->jointInletName.push_back(tokenizedString[3]); opts->jointOutletName.push_back(tokenizedString[4]); + }catch(...){ throw cvException(string("ERROR: Invalid JOINT Format. Line " + to_string(lineCount) + "\n").c_str()); } @@ -361,6 +376,17 @@ void readOptionsLegacyFormat(string inputFile, options* opts){ // Increment Line Count lineCount++; } + + // Post-process: to preserve legacy behavior, we're + // making each joint node the corresponding sequential + // node from the node array. See detailed comment above. + if(opts->jointName.size() > opts->nodeName.size()){ + throw cvException("ERROR: there must be at least as many nodes as there are joints.\n"); + } + for(size_t k = 0; k < opts->jointName.size(); ++k){ + opts->jointNode.push_back(opts->nodeName.at(k)); + } + // Close File infile.close(); } diff --git a/Code/Source/main.cxx b/Code/Source/main.cxx index 26e30f0..cb6c2cb 100644 --- a/Code/Source/main.cxx +++ b/Code/Source/main.cxx @@ -31,6 +31,7 @@ #include #include +#include #include "cvOneDGlobal.h" #include "cvOneDModelManager.h" @@ -75,6 +76,23 @@ int getDataTableIDFromStringKey(string key){ // =============================== // CREATE MODEL AND RUN SIMULATION // =============================== +namespace{ + +size_t findJointNodeIndexOrThrow(const auto& jointNodeName, const auto& nodeNames, const auto& jointName){ + // Return the index of "nodeNames" that corresponds to the "jointNodeName" + // or throw a context-specific error. + + auto const iter = std::find(nodeNames.begin(), nodeNames.end(), jointNodeName); + if (iter == nodeNames.end()) { + std::string const errMsg = "ERROR: The node '" + jointNodeName + "' required by joint '" + + jointName + "' was not found in the list of nodes."; + throw cvException(errMsg.c_str()); + } + return std::distance(nodeNames.begin(), iter); +} + +} // namespace + void createAndRunModel(const cvOneD::options& opts) { // MESSAGE @@ -142,9 +160,15 @@ void createAndRunModel(const cvOneD::options& opts) { asOutlets[loopB] = opts.jointOutletList[jointOutletID][loopB]; } } + + // Find the index of the indicated node. + auto const jointName = opts.jointName.at(loopA); + auto const nodeIndex = findJointNodeIndexOrThrow( + opts.jointNode.at(loopA), opts.nodeName, jointName); + // Finally Create Joint - jointError = oned->CreateJoint((char*)opts.jointName[loopA].c_str(), - opts.nodeXcoord[loopA], opts.nodeYcoord[loopA], opts.nodeZcoord[loopA], + jointError = oned->CreateJoint(jointName.c_str(), + opts.nodeXcoord[nodeIndex], opts.nodeYcoord[nodeIndex], opts.nodeZcoord[nodeIndex], totJointInlets, totJointOutlets, asInlets, asOutlets); if(jointError == CV_ERROR) { throw cvException(string("ERROR: Error Creating JOINT " + to_string(loopA) + "\n").c_str()); diff --git a/Tests/UnitTests/TestLegacySerializer.cxx b/Tests/UnitTests/TestLegacySerializer.cxx index 5f1957a..413157e 100644 --- a/Tests/UnitTests/TestLegacySerializer.cxx +++ b/Tests/UnitTests/TestLegacySerializer.cxx @@ -99,7 +99,13 @@ cvOneD::options bifurcationOptions() { // JOINT DATA options.jointName = {"JOINT1"}; - options.jointNode = {"1"}; + + // The joint node *DOES NOT* match the joint node + // defined in the file. + // + // This is, sadly, intentional. For details, see + // the comments in the legacy deserialization function. + options.jointNode = {"0"}; // Inlet and Outlet Connections for the Joint options.jointInletName = {"INSEGS"}; From 5384d378a4375c3a12884411612c08ddc276ae3c Mon Sep 17 00:00:00 2001 From: arsLibera Date: Sat, 1 Mar 2025 09:19:22 -0500 Subject: [PATCH 2/8] Additional clean up of options: (1) eliminate direct global population from legacy serializer. The output options are (for now) part of the options struct... until we can house them elsewhere. This frees the options from interactions with any global variables. (2) Updates to the json serializer: actually use node assocations now that the model will utilize them. Also include outputType and vtkOutputType until we move them. --- Code/Source/cvOneDOptions.h | 19 +++++------ Code/Source/cvOneDOptionsJsonParser.cxx | 9 +++-- Code/Source/cvOneDOptionsJsonSerializer.cxx | 23 ++++++------- Code/Source/cvOneDOptionsLegacySerializer.cxx | 22 ++++-------- Code/Source/main.cxx | 34 +++++++++++++++++++ Tests/UnitTests/TestFiles/TestInput.json | 3 +- Tests/UnitTests/TestJsonSerialization.cxx | 3 +- 7 files changed, 71 insertions(+), 42 deletions(-) diff --git a/Code/Source/cvOneDOptions.h b/Code/Source/cvOneDOptions.h index 54313c9..b1a82c9 100644 --- a/Code/Source/cvOneDOptions.h +++ b/Code/Source/cvOneDOptions.h @@ -35,6 +35,7 @@ #include #include #include +#include #include "cvOneDTypes.h" #include "cvOneDException.h" @@ -49,21 +50,14 @@ struct options{ // NODE DATA cvStringVec nodeName; - // Note that these coordinates are also used - // for the joints through an implicit mapping cvDoubleVec nodeXcoord; cvDoubleVec nodeYcoord; cvDoubleVec nodeZcoord; // JOINT DATA cvStringVec jointName; - - // "jointNode" isn't used currently -- but we want - // to start integrating it. It can be used to make - // the mapping from joint->node explicit. Right - // now, the mapping is implicit: each joint is - // associated with the node of the same index in - // the list of nodes. + // jointNode: name of the associated node that we will use + // to get the XYZ coordinates when creating the joint. cvStringVec jointNode; cvStringVec jointInletName; @@ -121,7 +115,12 @@ struct options{ double convergenceTolerance; int useIV; int useStab; - string outputType; + + // These are to preserve legacy behavior and are + // expected to be eventually migrated into a + // post-processing step. + string outputType = "TEXT"; + std::optional vtkOutputType = std::nullopt; }; void validateOptions(options const& opts); diff --git a/Code/Source/cvOneDOptionsJsonParser.cxx b/Code/Source/cvOneDOptionsJsonParser.cxx index a165af8..0f70247 100644 --- a/Code/Source/cvOneDOptionsJsonParser.cxx +++ b/Code/Source/cvOneDOptionsJsonParser.cxx @@ -206,8 +206,13 @@ void parseSolverOptions(const nlohmann::ordered_json& jsonData, options& opts) t opts.useIV = solverOptions.at("useIV").get(); opts.useStab = solverOptions.at("useStab").get(); - // Doesn't seem like this is used...but we'll provide a default anyway - opts.outputType = solverOptions.value("outputType", "None"); + // Until this is migrated elsewhere, we'll (optionally) store the solver options. + if(solverOptions.contains("outputType")){ + opts.outputType = solverOptions.at("outputType").get(); + } + if(solverOptions.contains("vtkOutputType")){ + opts.vtkOutputType = solverOptions.at("vtkOutputType").get(); + } } catch (const std::exception& e) { throw std::runtime_error("Error parsing 'solverOptions': " + std::string(e.what())); diff --git a/Code/Source/cvOneDOptionsJsonSerializer.cxx b/Code/Source/cvOneDOptionsJsonSerializer.cxx index be9bf29..34b9737 100644 --- a/Code/Source/cvOneDOptionsJsonSerializer.cxx +++ b/Code/Source/cvOneDOptionsJsonSerializer.cxx @@ -81,29 +81,22 @@ nlohmann::ordered_json serialize_joint_data(const options& opts) { nlohmann::ordered_json j = nlohmann::ordered_json::array(); // Check consistency of the joint inlet and outlet vectors - check_consistent_size("joints", opts.jointName, + check_consistent_size("joints", opts.jointName, opts.jointNode, opts.jointInletListNames, opts.jointInletListNumber, opts.jointOutletListNames, opts.jointOutletListNumber, opts.jointInletList, opts.jointOutletList); size_t jointCount = opts.jointName.size(); - // There must be enough nodes for us to make associations. This is - // an implicit assumption made in the current joint creation code. - if( opts.nodeName.size() < jointCount ){ - throw std::runtime_error("The implicit joint-node association requires that there be at least as many nodes as there are joints."); - } - for (size_t i = 0; i < jointCount; ++i) { nlohmann::ordered_json joint; // Add JOINT data - // We're adding the node based on the index so we - // can start incorporating the explicit association - // from the implicit one currently used in the code. + // Now that we're actually populating the joint node associations + // when deserializing the legacy format, we can utilize them here. joint["joint"] = { - {"id", "J" + std::to_string(i + 1)}, - {"associatedNode", opts.nodeName.at(i)}, + {"id", opts.jointName.at(i)}, + {"associatedNode", opts.jointNode.at(i)}, {"inletName", opts.jointInletListNames.at(i)}, {"outletName", opts.jointOutletListNames.at(i)} }; @@ -225,7 +218,13 @@ nlohmann::ordered_json serializeSolverOptions(options const& opts) { solverOptions["convergenceTolerance"] = opts.convergenceTolerance; solverOptions["useIV"] = opts.useIV; solverOptions["useStab"] = opts.useStab; + + // For now, we're serializing the output data. + // In the future, we'll want to migrate these. solverOptions["outputType"] = opts.outputType; + if(opts.vtkOutputType){ + solverOptions["vtkOutputType"] = *opts.vtkOutputType; + } return solverOptions; } diff --git a/Code/Source/cvOneDOptionsLegacySerializer.cxx b/Code/Source/cvOneDOptionsLegacySerializer.cxx index ab4ddf5..594c53d 100644 --- a/Code/Source/cvOneDOptionsLegacySerializer.cxx +++ b/Code/Source/cvOneDOptionsLegacySerializer.cxx @@ -36,7 +36,6 @@ #include #include "cvOneDUtility.h" -#include "cvOneDGlobal.h" namespace cvOneD{ @@ -273,22 +272,13 @@ void readOptionsLegacyFormat(string inputFile, options* opts){ }else if(tokenizedString.size() < 2){ throw cvException(string("ERROR: Not enough parameters for OUTPUT token. Line " + to_string(lineCount) + "\n").c_str()); } - // Output Type - if(upper_string(tokenizedString[1]) == "TEXT"){ - cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_TEXT; - }else if(upper_string(tokenizedString[1]) == "VTK"){ - cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_VTK; - }else if(upper_string(tokenizedString[1]) == "BOTH"){ - cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_BOTH; - }else{ - throw cvException("ERROR: Invalid OUTPUT Type.\n"); - } + + // Collect and store the output type data + opts->outputType = tokenizedString[1]; if(tokenizedString.size() > 2){ - cvOneDGlobal::vtkOutputType = atoi(tokenizedString[2].c_str()); - if(cvOneDGlobal::vtkOutputType > 1){ - throw cvException("ERROR: Invalid OUTPUT VTK Type.\n"); - } + opts->vtkOutputType = atoi(tokenizedString[2].c_str()); } + }else if(upper_string(tokenizedString[0]) == std::string("DATATABLE")){ // printf("Found Data Table.\n"); try{ @@ -379,7 +369,7 @@ void readOptionsLegacyFormat(string inputFile, options* opts){ // Post-process: to preserve legacy behavior, we're // making each joint node the corresponding sequential - // node from the node array. See detailed comment above. + // node from the node array. See comment above for additional details. if(opts->jointName.size() > opts->nodeName.size()){ throw cvException("ERROR: there must be at least as many nodes as there are joints.\n"); } diff --git a/Code/Source/main.cxx b/Code/Source/main.cxx index cb6c2cb..9118148 100644 --- a/Code/Source/main.cxx +++ b/Code/Source/main.cxx @@ -331,6 +331,32 @@ void createAndRunModel(const cvOneD::options& opts) { // ============== // RUN ONEDSOLVER // ============== + +namespace { + +void setOutputGlobals(const cvOneD::options& opts){ + + if(upper_string(opts.outputType) == "TEXT"){ + cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_TEXT; + }else if(upper_string(opts.outputType) == "VTK"){ + cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_VTK; + }else if(upper_string(opts.outputType) == "BOTH"){ + cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_BOTH; + }else{ + throw cvException("ERROR: Invalid OUTPUT Type.\n"); + } + + if(opts.vtkOutputType){ + cvOneDGlobal::vtkOutputType = *opts.vtkOutputType; + if(cvOneDGlobal::vtkOutputType > 1){ + throw cvException("ERROR: Invalid OUTPUT VTK Type.\n"); + } + } + +} + +} // namespace + void runOneDSolver(const cvOneD::options& opts){ // Model Checking @@ -345,6 +371,14 @@ void runOneDSolver(const cvOneD::options& opts){ string jsonFilename("echo.json"); cvOneD::writeJsonOptions(opts, jsonFilename); + // Per the existing behavior, we'll set output globals + // from the options. TODO: we should really just + // consume option data from the options rather + // than setting it in a global variable. Besides that, + // we should move the VTK options to a postprocessor + // rather than have them in the solver options. + setOutputGlobals(opts); + // Create Model and Run Simulation createAndRunModel(opts); diff --git a/Tests/UnitTests/TestFiles/TestInput.json b/Tests/UnitTests/TestFiles/TestInput.json index e4adfa6..59c49b7 100644 --- a/Tests/UnitTests/TestFiles/TestInput.json +++ b/Tests/UnitTests/TestFiles/TestInput.json @@ -10,7 +10,8 @@ "convergenceTolerance": 1e-06, "useIV": 1, "useStab": 0, - "outputType": "NONE" + "outputType": "SOME OUTPUT TYPE", + "vtkOutputType": 23 }, "materials": [ { diff --git a/Tests/UnitTests/TestJsonSerialization.cxx b/Tests/UnitTests/TestJsonSerialization.cxx index e8de928..66831d1 100644 --- a/Tests/UnitTests/TestJsonSerialization.cxx +++ b/Tests/UnitTests/TestJsonSerialization.cxx @@ -81,7 +81,8 @@ cvOneD::options test_input_options(){ opts.convergenceTolerance = 1.0e-6; opts.useIV = 1; opts.useStab = 0; - opts.outputType = "NONE"; + opts.outputType = "SOME OUTPUT TYPE"; + opts.vtkOutputType = 23; return opts; } From 5a56f8a8654568ac3f739b5dcbfd00a47d308573 Mon Sep 17 00:00:00 2001 From: arsLibera Date: Sun, 2 Mar 2025 07:28:10 -0500 Subject: [PATCH 3/8] replace hardcoded requirements on binary location in system tests (test_integration.py) with an option for the binary location passed to pytest. That enables simple running of pytest regardless of the particular location of the binary directory. For example: pytest Tests/SystemTests --relativeExePath="../svOneDSolver_build/bin/OneDSolver" Also: make the system tests actually execute tests in the temporary directory so that we don't have artifacts generated in whatever directory we run the tests from. --- .github/workflows/test.yml | 3 +- CMakeLists.txt | 35 ++++++----- Code/Source/main.cxx | 14 +++-- Tests/SystemTests/.gitignore | 1 + Tests/SystemTests/conftest.py | 4 ++ Tests/SystemTests/test_integration.py | 84 ++++++++++++++------------- 6 files changed, 78 insertions(+), 63 deletions(-) create mode 100644 Tests/SystemTests/.gitignore diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6aa6a79..4d8444e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -19,7 +19,8 @@ jobs: pip install numpy - name: result tests run: | - pytest Tests/SystemTests + # We're providing the path to the EXE based on the prior build solver step. + pytest Tests/SystemTests --relativeExePath="build_skyline/bin/OneDSolver" - name: result unit tests run: | cd build_skyline diff --git a/CMakeLists.txt b/CMakeLists.txt index 339d4f2..c923e55 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.11) +CMAKE_MINIMUM_REQUIRED(VERSION 3.18) # COMMON SETTINGS PROJECT(OneDSolver) @@ -71,6 +71,17 @@ ELSEIF(sparseSolverType STREQUAL "csparse") "${CMAKE_CURRENT_SOURCE_DIR}/Code/Source/sparse/csparse/*.h") ENDIF() +# NLOHMANN JSON FOR SERIALIZATION +include(FetchContent) +FetchContent_Declare( + nlohmann_json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.11.2 # You can specify a version or use the latest tag +) +FetchContent_MakeAvailable(nlohmann_json) +# Global include for nlohmann headers +include_directories(${nlohmann_json_SOURCE_DIR}/single_include) + # COPY DATASET FOLDER # FILE(COPY "${CMAKE_CURRENT_SOURCE_DIR}/tests" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") @@ -88,9 +99,14 @@ IF(buildDocs) ADD_SUBDIRECTORY(${DOCS_DIR}) ENDIF() + # INSTALL PYTHON INTERFACE IF(buildPy) + find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module) + + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/Code/Source) + # SWIG INTERFACE FILES FILE(GLOB ONED_SRC_I "${CMAKE_CURRENT_SOURCE_DIR}/Code/Source/oneDSolver.i") @@ -111,16 +127,6 @@ if (BUILD_SV_INSTALLER) add_subdirectory("${CMAKE_SOURCE_DIR}/Distribution") ENDIF() -# NLOHMANN JSON FOR SERIALIZATION -include(FetchContent) -FetchContent_Declare( - nlohmann_json - GIT_REPOSITORY https://github.com/nlohmann/json.git - GIT_TAG v3.11.2 # You can specify a version or use the latest tag -) -FetchContent_MakeAvailable(nlohmann_json) -target_include_directories(${PROJECT_NAME} PUBLIC ${nlohmann_json_SOURCE_DIR}/single_include) - # Unit tests and Google Test if(ENABLE_UNIT_TEST) @@ -174,13 +180,6 @@ if(ENABLE_UNIT_TEST) # handle both use cases. target_link_libraries(UnitTestsExecutable gtest gtest_main pthread) - # We need to include Nlohmann when compiling the unit test executable - # A better strategy would be to build up a list of what we're going to - # include and reuse it. - # Another option: we could simply build the library of cvOneD functionality - # and link against that both here and in the main executable. - target_include_directories(UnitTestsExecutable PRIVATE ${nlohmann_json_SOURCE_DIR}/single_include) - # It's likely we'll need to include additional directories for some # versions of the unit tests. That can be updated when/if we update # the general strategy. diff --git a/Code/Source/main.cxx b/Code/Source/main.cxx index 9118148..c798340 100644 --- a/Code/Source/main.cxx +++ b/Code/Source/main.cxx @@ -384,7 +384,7 @@ void runOneDSolver(const cvOneD::options& opts){ } -void convertOptions(const std::string& legacyFilename, const std::string& jsonFilename){ +void convertLegacyToJsonOptions(const std::string& legacyFilename, const std::string& jsonFilename){ // Convert legacy format to JSON and print it to file cvOneD::options opts{}; cvOneD::readOptionsLegacyFormat(legacyFilename,&opts); @@ -426,6 +426,12 @@ ArgOptions parseInputArgs(int argc, char** argv) { return options; } +cvOneD::options readLegacyOptions(std::string const& inputFile){ + cvOneD::options opts{}; + cvOneD::readOptionsLegacyFormat(inputFile,&opts); + return opts; +} + // Parse the incoming arguments and read the options file. // Also performs option file conversions if requested by user. // @@ -449,9 +455,7 @@ std::optional parseArgsAndHandleOptions(int argc, char** argv){ // Legacy behavior if(argc == 2){ string inputFile(argv[1]); - cvOneD::options opts{}; - cvOneD::readOptionsLegacyFormat(inputFile,&opts); - return opts; + return readLegacyOptions(inputFile); } // Default behavior @@ -459,7 +463,7 @@ std::optional parseArgsAndHandleOptions(int argc, char** argv){ // Conversion if(argOptions.legacyConversionInput && argOptions.jsonConversionOutput){ - convertOptions(*argOptions.legacyConversionInput, + convertLegacyToJsonOptions(*argOptions.legacyConversionInput, *argOptions.jsonConversionOutput); } diff --git a/Tests/SystemTests/.gitignore b/Tests/SystemTests/.gitignore new file mode 100644 index 0000000..ba0430d --- /dev/null +++ b/Tests/SystemTests/.gitignore @@ -0,0 +1 @@ +__pycache__/ \ No newline at end of file diff --git a/Tests/SystemTests/conftest.py b/Tests/SystemTests/conftest.py index 242abfb..75cb82e 100644 --- a/Tests/SystemTests/conftest.py +++ b/Tests/SystemTests/conftest.py @@ -8,3 +8,7 @@ def tempdir(): """Temporary directory for test purposes.""" with TemporaryDirectory() as tempdir: yield tempdir + +def pytest_addoption(parser): + parser.addoption("--relativeExePath", action="store") + \ No newline at end of file diff --git a/Tests/SystemTests/test_integration.py b/Tests/SystemTests/test_integration.py index aa957a4..ba2de4a 100644 --- a/Tests/SystemTests/test_integration.py +++ b/Tests/SystemTests/test_integration.py @@ -5,18 +5,25 @@ import re import subprocess import numpy as np +import pytest from collections import defaultdict +@pytest.fixture(scope="session") +def exePath(pytestconfig): + relativePath = pytestconfig.getoption("relativeExePath") -def run_test_case_by_name(name, testdir): + assert relativePath, "--relativeExePath is required. Please " \ + "provide the (relative) path to the executable." + + return os.path.abspath(relativePath) + +def run_test_case_by_name(name, testdir, exePath): """Run a test case by its case name. Args: name: Name of the test case. testdir: Directory for performing the simulation. """ - # file where executable has been built - build_dir = 'build_skyline' # test file testfile = os.path.join(os.path.dirname(__file__), 'cases', name + '.in') @@ -26,25 +33,25 @@ def run_test_case_by_name(name, testdir): shutil.copyfile(testfile, testfile_tmp) # run 1D simulation - run_oned(build_dir, testfile_tmp) + run_oned(testfile_tmp, testdir, exePath) # extract results - return read_results_1d('.', 'results_' + name + '_seg*') + return read_results_1d(testdir, 'results_' + name + '_seg*') -def run_oned(build_dir, name): +def run_oned(name, testdir, exePath): """ Executes svOneDSolver with given input file """ - # name of svOneDSolver executable - exe = os.path.join(build_dir, 'bin', 'OneDSolver') - # name of input file - inp = os.path.join('cases', name) + # Absolute path of the input file + inp = os.path.join(testdir, name) - # run simulation + # Run simulation in the testdir to keep the current directory clean. + # Because we are in a temporary directory, we have to pass the + # locations as absolute paths. try: - subprocess.check_output([exe, inp]) + subprocess.check_output([exePath, inp], cwd=testdir) except subprocess.CalledProcessError as err: raise RuntimeError('Test failed. svOneDSolver returned error:\n' + err.output.decode("utf-8")) @@ -94,73 +101,72 @@ def get_result(results, field, seg, node, time, fun): else: raise ValueError('Unknown result type ' + fun) - -def test_tube_pressure(tmpdir): - results = run_test_case_by_name('tube_pressure', tmpdir) +def test_tube_pressure(tmpdir, exePath): + results = run_test_case_by_name('tube_pressure', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 11005.30965, rtol=1.0e-7) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 10000.0, rtol=1.0e-8) assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) -def test_tube_pressure_wave(tmpdir): - results = run_test_case_by_name('tube_pressure_wave', tmpdir) +def test_tube_pressure_wave(tmpdir, exePath): + results = run_test_case_by_name('tube_pressure_wave', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 10000.0 , rtol=1.0e-8) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 9086.52306835, rtol=1.0e-4) assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 90.8652306835, rtol=1.0e-4) assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) -def test_tube_rcr(tmpdir): - results = run_test_case_by_name('tube_rcr', tmpdir) +def test_tube_rcr(tmpdir, exePath): + results = run_test_case_by_name('tube_rcr', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 11005.30965, rtol=1.0e-7) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 10000.0, rtol=1.0e-8) assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) -def test_tube_rcr_Pd(tmpdir): - results = run_test_case_by_name('tube_rcr_Pd', tmpdir) +def test_tube_rcr_Pd(tmpdir, exePath): + results = run_test_case_by_name('tube_rcr_Pd', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 12005.30965, rtol=1.0e-7) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 11000.0, rtol=1.0e-8) assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) -def test_tube_r(tmpdir): - results = run_test_case_by_name('tube_r', tmpdir) +def test_tube_r(tmpdir, exePath): + results = run_test_case_by_name('tube_r', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 11005.30965, rtol=1.0e-7) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 10000.0, rtol=1.0e-8) assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) -def test_tube_r_Pd(tmpdir): - results = run_test_case_by_name('tube_r_Pd', tmpdir) +def test_tube_r_Pd(tmpdir, exePath): + results = run_test_case_by_name('tube_r_Pd', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 12005.30965, rtol=1.0e-7) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 11000.0, rtol=1.0e-8) assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) -def test_tube_r_stab(tmpdir): - results = run_test_case_by_name('tube_r_stab', tmpdir) +def test_tube_r_stab(tmpdir, exePath): + results = run_test_case_by_name('tube_r_stab', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 11005.30965, rtol=1.0e-7) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 10000.0, rtol=1.0e-8) assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) -def test_tube_stenosis_r(tmpdir): - results = run_test_case_by_name('tube_stenosis_r', tmpdir) +def test_tube_stenosis_r(tmpdir, exePath): + results = run_test_case_by_name('tube_stenosis_r', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 10150.68211, rtol=1.0e-6) assert np.isclose(get_result(results, 'pressure', 2, -1, -1, 'point'), 10000.0, rtol=1.0e-8) assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-10) assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 10.0, rtol=1.0e-4) -def test_bifurcation_P(tmpdir): - results = run_test_case_by_name('bifurcation_P', tmpdir) +def test_bifurcation_P(tmpdir, exePath): + results = run_test_case_by_name('bifurcation_P', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 4039.45953118937, rtol=1e-5) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 4026.67220709878, rtol=1e-5) assert np.isclose(get_result(results, 'pressure', 1, 0, -1, 'point'), 4026.67220709878, rtol=1e-5) @@ -171,8 +177,8 @@ def test_bifurcation_P(tmpdir): assert np.isclose(get_result(results, 'flow', 2, -1, -1, 'point'), 3.9925, rtol=1e-6) -def test_bifurcation_R(tmpdir): - results = run_test_case_by_name('bifurcation_R', tmpdir) +def test_bifurcation_R(tmpdir, exePath): + results = run_test_case_by_name('bifurcation_R', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 3997.46433118937, rtol=1e-5) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 3984.67700709878, rtol=1e-5) assert np.isclose(get_result(results, 'pressure', 1, 0, -1, 'point'), 3984.67700709878, rtol=1e-5) @@ -183,8 +189,8 @@ def test_bifurcation_R(tmpdir): assert np.isclose(get_result(results, 'flow', 2, -1, -1, 'point'), 3.9925, rtol=1e-5) -def test_bifurcation_R_stab(tmpdir): - results = run_test_case_by_name('bifurcation_R_stab', tmpdir) +def test_bifurcation_R_stab(tmpdir, exePath): + results = run_test_case_by_name('bifurcation_R_stab', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 3997.46433118937, rtol=1e-6) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 3984.67700709878, rtol=1e-6) assert np.isclose(get_result(results, 'pressure', 1, 0, -1, 'point'), 3984.67700709878, rtol=1e-6) @@ -195,8 +201,8 @@ def test_bifurcation_R_stab(tmpdir): assert np.isclose(get_result(results, 'flow', 2, -1, -1, 'point'), 3.9925, rtol=1e-6) -def test_bifurcation_RCR(tmpdir): - results = run_test_case_by_name('bifurcation_RCR', tmpdir) +def test_bifurcation_RCR(tmpdir, exePath): + results = run_test_case_by_name('bifurcation_RCR', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, np.arange(100, 200), 'mean'), 123878.022943, rtol=1e-7) assert np.isclose(get_result(results, 'pressure', 0, 0, np.arange(100, 200), 'max'), 168182.372624, rtol=1e-7) assert np.isclose(get_result(results, 'pressure', 0, 0, np.arange(100, 200), 'min'), 89237.6441223, rtol=1e-7) @@ -212,8 +218,8 @@ def test_bifurcation_RCR(tmpdir): assert np.isclose(get_result(results, 'flow', 1, -1, np.arange(100, 200), 'min'), -3.35029015773, rtol=1e-7) -def test_bifurcation_RCR_staticFunc(tmpdir): - results = run_test_case_by_name('bifurcation_RCR_staticFunc', tmpdir) +def test_bifurcation_RCR_staticFunc(tmpdir, exePath): + results = run_test_case_by_name('bifurcation_RCR_staticFunc', tmpdir, exePath) assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 3997.46433118937, rtol=1e-6,) assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 3984.67700709878, rtol=1e-6,) assert np.isclose(get_result(results, 'pressure', 1, 0, -1, 'point'), 3984.67700709878, rtol=1e-6,) From e8ecb82e8d75e6319bfd676fcab794222685f24f Mon Sep 17 00:00:00 2001 From: arsLibera Date: Sun, 2 Mar 2025 09:27:59 -0500 Subject: [PATCH 4/8] add system tests for conversion to JSON input files and simulation using JSON input files. Add documentation for the executable usage and running unit and system tests --- .github/workflows/test.yml | 6 +- .gitignore | 3 + CMakeLists.txt | 5 - Code/Source/main.cxx | 16 +- README.md | 72 ++++++ Tests/SystemTests/test_integration.py | 332 ++++++++++++++------------ 6 files changed, 271 insertions(+), 163 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4d8444e..9b67531 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -15,12 +15,12 @@ jobs: make - name: install dependencies run: | - pip install pytest pytest-cov pytest-mock + pip install pytest pytest-cov pytest-mock pytest-xdist pip install numpy - - name: result tests + - name: result simulation system tests run: | # We're providing the path to the EXE based on the prior build solver step. - pytest Tests/SystemTests --relativeExePath="build_skyline/bin/OneDSolver" + pytest Tests/SystemTests -n auto --relativeExePath="build_skyline/bin/OneDSolver" - name: result unit tests run: | cd build_skyline diff --git a/.gitignore b/.gitignore index c28e83e..e0a89cb 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,6 @@ build # Vscode .vscode/ + +# Python +venv/ diff --git a/CMakeLists.txt b/CMakeLists.txt index c923e55..07377d6 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -99,14 +99,9 @@ IF(buildDocs) ADD_SUBDIRECTORY(${DOCS_DIR}) ENDIF() - # INSTALL PYTHON INTERFACE IF(buildPy) - find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module) - - include_directories(${CMAKE_CURRENT_SOURCE_DIR}/Code/Source) - # SWIG INTERFACE FILES FILE(GLOB ONED_SRC_I "${CMAKE_CURRENT_SOURCE_DIR}/Code/Source/oneDSolver.i") diff --git a/Code/Source/main.cxx b/Code/Source/main.cxx index c798340..916b636 100644 --- a/Code/Source/main.cxx +++ b/Code/Source/main.cxx @@ -402,6 +402,14 @@ struct ArgOptions{ std::optional jsonConversionOutput = std::nullopt; }; +std::string removeQuotesIfPresent(const std::string& str) { + std::string result = str; + if (result.front() == '"' && result.back() == '"') { + result = result.substr(1, result.length() - 2); + } + return result; +} + ArgOptions parseInputArgs(int argc, char** argv) { // Right now, we don't check for bad arguments but we could // do that in here if we want more coherent behavior. @@ -411,13 +419,13 @@ ArgOptions parseInputArgs(int argc, char** argv) { std::string arg = argv[i]; if (arg == "-jsonInput" && i + 1 < argc) { - options.jsonInput = argv[i + 1]; + options.jsonInput = removeQuotesIfPresent(argv[i + 1]); i++; } if (arg == "-legacyToJson" && i + 2 < argc) { - options.legacyConversionInput = argv[i + 1]; - options.jsonConversionOutput = argv[i + 2]; + options.legacyConversionInput = removeQuotesIfPresent(argv[i + 1]); + options.jsonConversionOutput = removeQuotesIfPresent(argv[i + 2]); i++; } @@ -454,7 +462,7 @@ std::optional parseArgsAndHandleOptions(int argc, char** argv){ // Legacy behavior if(argc == 2){ - string inputFile(argv[1]); + string inputFile{removeQuotesIfPresent(argv[1])}; return readLegacyOptions(inputFile); } diff --git a/README.md b/README.md index ea56eed..42b3005 100755 --- a/README.md +++ b/README.md @@ -49,6 +49,78 @@ make -j n where n is the number of processors you want to use to build. +### Basic usage + +There are three supported usages for the generated executable. +* The quotations around the input args are optional +* Input file paths can be specified as a local file name or an absolute path + +#### 1. Run simulation using a legacy input file +Run the simulation with with a legacy ".in" input file. +~~~ +svOneDSolver ".in" +~~~ + +#### 2. Convert legacy input file to JSON +Convert a legacy input file to a JSON input file +~~~ +svOneDSolver -legacyToJson ".in" ".json" +~~~ + +#### 3. Run simulation using a JSON input file +Run the simulation using a JSON input file +~~~ +svOneDSolver -jsonInput ".json" +~~~ + + +### Run tests + +System and unit tests can be run using ctest and pytest commands. + +The following assume: +* A source folder "svOneDSolver/" and a neighboring build folder "svOneDSolver_build/" +* The source has been built the using "cmake" and "make" commands (as described above) + +#### Unit tests +Navigate to the build folder and run the ctest command. + +~~~ +ctest +~~~ + +#### System tests +The system tests run simulations and verify some of the resulting output. + +You must have python and pytest installed. + +Navigate to the source folder, and run the following: + +~~~ +pytest Tests/SystemTests --relativeExePath="../svOneDSolver_build/bin/OneDSolver" +~~~ + +If the binary is in a different location, specify the correct location. + +
+ Running system tests in parallel + This offers some details for how to set up a python environment and install the relevant packages on linux, and then run the tests in parallel. Note that depending on the OS this setup process will vary, but the basic steps will remain the same. + + Navigate to the source folder, and run the following: + ~~~ + sudo apt install python3-venv + python3 -m venv venv + source venv/bin/activate + pip install numpy pytest-xdist + ~~~ + + With xdist available, run the tests in parallel (`-n `). + ~~~ + pytest Tests/SystemTests --relativeExePath="../svOneDSolver_build/bin/OneDSolver" -n 6 + ~~~ +
+ + ### CMake Options Three options are available in CMake: diff --git a/Tests/SystemTests/test_integration.py b/Tests/SystemTests/test_integration.py index ba2de4a..cf1d751 100644 --- a/Tests/SystemTests/test_integration.py +++ b/Tests/SystemTests/test_integration.py @@ -17,41 +17,55 @@ def exePath(pytestconfig): return os.path.abspath(relativePath) -def run_test_case_by_name(name, testdir, exePath): - """Run a test case by its case name. - Args: - name: Name of the test case. - testdir: Directory for performing the simulation. - """ +def run_simulation_and_assert_results(testCaseName, tmpdir, exePath, resultData, run_func): + # Run the test case using the provided function (either legacy or converted JSON) + results = run_test_case_by_name(testCaseName, tmpdir, exePath, run_func) - # test file - testfile = os.path.join(os.path.dirname(__file__), 'cases', name + '.in') - testfile_tmp = os.path.join(testdir, name + '.in') + # Loop over each result check and assert + for field, seg, node, time, fun, expectedValue, rtol in resultData: + resultValue = get_result_value(results, field, seg, node, time, fun) - # copy input file to temporary test dir - shutil.copyfile(testfile, testfile_tmp) + # Assertion with details + assert resultValue == pytest.approx(expectedValue, rel=rtol, abs=1e-8), \ + f"\n\nTest failed for {testCaseName} - Field: {field}, Seg: {seg}, Node: {node}, Time: {time}, Fun: {fun}. " \ + f"\nExpected: {expectedValue}, Got: {resultValue}\n" - # run 1D simulation - run_oned(testfile_tmp, testdir, exePath) - # extract results - return read_results_1d(testdir, 'results_' + name + '_seg*') +def run_test_case_by_name(name, testDir, exePath, run_func): + # test file absolute path + inputFilePath = os.path.join(os.path.dirname(__file__), 'cases', name + '.in') + # run 1D simulation + run_func(inputFilePath, testDir, exePath) -def run_oned(name, testdir, exePath): - """ - Executes svOneDSolver with given input file - """ + # extract results + return read_results_1d(testDir, 'results_' + name + '_seg*') - # Absolute path of the input file - inp = os.path.join(testdir, name) - # Run simulation in the testdir to keep the current directory clean. +def run_oned(inputFilePath, testDir, exePath): + # Run simulation in the testDir to keep the current directory clean. # Because we are in a temporary directory, we have to pass the # locations as absolute paths. try: - subprocess.check_output([exePath, inp], cwd=testdir) + subprocess.check_output([exePath, inputFilePath], cwd=testDir) + except subprocess.CalledProcessError as err: + raise RuntimeError('Test failed. svOneDSolver returned error:\n' + err.output.decode("utf-8")) + + +def run_converted_json_input(inputFilePath, testDir, exePath): + # Run the simulations again, but this time use the JSON input file + try: + tempJsonFilePath = "tempInput.json" + + conversionCommandList = [exePath, "-legacyToJson", f'"{inputFilePath}"', f'"{tempJsonFilePath}"'] + runCommandList = [exePath, "-jsonInput", f'"{tempJsonFilePath}"'] + + # First, convert to legacy ".in" file to a JSON input file + subprocess.check_output(conversionCommandList, cwd=testDir) + + # Now, run the actual simulation with the JSON input file + subprocess.check_output(runCommandList, cwd=testDir) except subprocess.CalledProcessError as err: raise RuntimeError('Test failed. svOneDSolver returned error:\n' + err.output.decode("utf-8")) @@ -82,7 +96,7 @@ def read_results_1d(res_dir, name): return res -def get_result(results, field, seg, node, time, fun): +def get_result_value(results, field, seg, node, time, fun): """ Read results and select function """ @@ -101,130 +115,146 @@ def get_result(results, field, seg, node, time, fun): else: raise ValueError('Unknown result type ' + fun) -def test_tube_pressure(tmpdir, exePath): - results = run_test_case_by_name('tube_pressure', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 11005.30965, rtol=1.0e-7) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 10000.0, rtol=1.0e-8) - assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) - assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) - - -def test_tube_pressure_wave(tmpdir, exePath): - results = run_test_case_by_name('tube_pressure_wave', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 10000.0 , rtol=1.0e-8) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 9086.52306835, rtol=1.0e-4) - assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 90.8652306835, rtol=1.0e-4) - assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) - - -def test_tube_rcr(tmpdir, exePath): - results = run_test_case_by_name('tube_rcr', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 11005.30965, rtol=1.0e-7) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 10000.0, rtol=1.0e-8) - assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) - assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) - - -def test_tube_rcr_Pd(tmpdir, exePath): - results = run_test_case_by_name('tube_rcr_Pd', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 12005.30965, rtol=1.0e-7) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 11000.0, rtol=1.0e-8) - assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) - assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) - - -def test_tube_r(tmpdir, exePath): - results = run_test_case_by_name('tube_r', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 11005.30965, rtol=1.0e-7) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 10000.0, rtol=1.0e-8) - assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) - assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) - - -def test_tube_r_Pd(tmpdir, exePath): - results = run_test_case_by_name('tube_r_Pd', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 12005.30965, rtol=1.0e-7) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 11000.0, rtol=1.0e-8) - assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) - assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) - - -def test_tube_r_stab(tmpdir, exePath): - results = run_test_case_by_name('tube_r_stab', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 11005.30965, rtol=1.0e-7) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 10000.0, rtol=1.0e-8) - assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-16) - assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 1.0, rtol=1.0e-5) - - -def test_tube_stenosis_r(tmpdir, exePath): - results = run_test_case_by_name('tube_stenosis_r', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 10150.68211, rtol=1.0e-6) - assert np.isclose(get_result(results, 'pressure', 2, -1, -1, 'point'), 10000.0, rtol=1.0e-8) - assert np.isclose(get_result(results, 'flow', 0, -1, -1, 'point'), 100.0, rtol=1.0e-10) - assert np.isclose(get_result(results, 'area', 0, -1, -1, 'point'), 10.0, rtol=1.0e-4) - - -def test_bifurcation_P(tmpdir, exePath): - results = run_test_case_by_name('bifurcation_P', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 4039.45953118937, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 4026.67220709878, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 1, 0, -1, 'point'), 4026.67220709878, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 2, 0, -1, 'point'), 4026.67220709878, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 1, -1, -1, 'point'), 4000.00, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 2, -1, -1, 'point'), 4000.00, rtol=1e-5) - assert np.isclose(get_result(results, 'flow', 1, -1, -1, 'point'), 3.9925, rtol=1e-6) - assert np.isclose(get_result(results, 'flow', 2, -1, -1, 'point'), 3.9925, rtol=1e-6) - - -def test_bifurcation_R(tmpdir, exePath): - results = run_test_case_by_name('bifurcation_R', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 3997.46433118937, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 3984.67700709878, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 1, 0, -1, 'point'), 3984.67700709878, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 2, 0, -1, 'point'), 3984.67700709878, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 1, -1, -1, 'point'), 3958.0048, rtol=1e-5) - assert np.isclose(get_result(results, 'pressure', 2, -1, -1, 'point'), 3958.0048, rtol=1e-5) - assert np.isclose(get_result(results, 'flow', 1, -1, -1, 'point'), 3.9925, rtol=1e-5) - assert np.isclose(get_result(results, 'flow', 2, -1, -1, 'point'), 3.9925, rtol=1e-5) - - -def test_bifurcation_R_stab(tmpdir, exePath): - results = run_test_case_by_name('bifurcation_R_stab', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 3997.46433118937, rtol=1e-6) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 3984.67700709878, rtol=1e-6) - assert np.isclose(get_result(results, 'pressure', 1, 0, -1, 'point'), 3984.67700709878, rtol=1e-6) - assert np.isclose(get_result(results, 'pressure', 2, 0, -1, 'point'), 3984.67700709878, rtol=1e-6) - assert np.isclose(get_result(results, 'pressure', 1, -1, -1, 'point'), 3958.0048, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 2, -1, -1, 'point'), 3958.0048, rtol=1e-7) - assert np.isclose(get_result(results, 'flow', 1, -1, -1, 'point'), 3.9925, rtol=1e-6) - assert np.isclose(get_result(results, 'flow', 2, -1, -1, 'point'), 3.9925, rtol=1e-6) - - -def test_bifurcation_RCR(tmpdir, exePath): - results = run_test_case_by_name('bifurcation_RCR', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, np.arange(100, 200), 'mean'), 123878.022943, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 0, 0, np.arange(100, 200), 'max'), 168182.372624, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 0, 0, np.arange(100, 200), 'min'), 89237.6441223, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 1, -1, np.arange(100, 200), 'mean'), 123855.677783, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 1, -1, np.arange(100, 200), 'max'), 171598.373528, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 1, -1, np.arange(100, 200), 'min'), 87624.0897929, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 2, -1, np.arange(100, 200), 'mean'), 123855.677783, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 2, -1, np.arange(100, 200), 'max'), 171598.373528, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 2, -1, np.arange(100, 200), 'min'), 87624.0897929, rtol=1e-7) - assert np.isclose(get_result(results, 'flow', 0, 0, np.arange(100, 200), 'mean'), 7.557147487534, rtol=1e-7) - assert np.isclose(get_result(results, 'flow', 1, -1, np.arange(100, 200), 'mean'), 3.839024865141, rtol=1e-7) - assert np.isclose(get_result(results, 'flow', 1, -1, np.arange(100, 200), 'max'), 24.0553490482, rtol=1e-7) - assert np.isclose(get_result(results, 'flow', 1, -1, np.arange(100, 200), 'min'), -3.35029015773, rtol=1e-7) - - -def test_bifurcation_RCR_staticFunc(tmpdir, exePath): - results = run_test_case_by_name('bifurcation_RCR_staticFunc', tmpdir, exePath) - assert np.isclose(get_result(results, 'pressure', 0, 0, -1, 'point'), 3997.46433118937, rtol=1e-6,) - assert np.isclose(get_result(results, 'pressure', 0, -1, -1, 'point'), 3984.67700709878, rtol=1e-6,) - assert np.isclose(get_result(results, 'pressure', 1, 0, -1, 'point'), 3984.67700709878, rtol=1e-6,) - assert np.isclose(get_result(results, 'pressure', 2, 0, -1, 'point'), 3984.67700709878, rtol=1e-6,) - assert np.isclose(get_result(results, 'pressure', 1, -1, -1, 'point'), 3958.0048, rtol=1e-7) - assert np.isclose(get_result(results, 'pressure', 2, -1, -1, 'point'), 3958.0048, rtol=1e-7) - assert np.isclose(get_result(results, 'flow', 1, -1, -1, 'point'), 3.9925, rtol=1e-6,) - assert np.isclose(get_result(results, 'flow', 2, -1, -1, 'point'), 3.9925, rtol=1e-6,) +# Each element in the test cases list is a tuple structured as follows: +# +# (, ) +# - : A string representing the name of the test case +# - : A list of tuples, each containing: +# (, , ,