Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,12 @@ jobs:
make
- name: install dependencies
run: |
pip install pytest pytest-cov pytest-mock
pip install pytest pytest-cov pytest-mock pytest-xdist
pip install numpy
- name: result tests
- name: result simulation system tests
run: |
pytest Tests/SystemTests
# We're providing the path to the EXE based on the prior build solver step.
pytest Tests/SystemTests -n auto --relativeExePath="build_skyline/bin/OneDSolver"
- name: result unit tests
run: |
cd build_skyline
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,6 @@ build

# Vscode
.vscode/

# Python
venv/
30 changes: 12 additions & 18 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

CMAKE_MINIMUM_REQUIRED(VERSION 3.11)
CMAKE_MINIMUM_REQUIRED(VERSION 3.18)

# COMMON SETTINGS
PROJECT(OneDSolver)
Expand Down Expand Up @@ -71,6 +71,17 @@ ELSEIF(sparseSolverType STREQUAL "csparse")
"${CMAKE_CURRENT_SOURCE_DIR}/Code/Source/sparse/csparse/*.h")
ENDIF()

# NLOHMANN JSON FOR SERIALIZATION
include(FetchContent)
FetchContent_Declare(
nlohmann_json
GIT_REPOSITORY https://github.com/nlohmann/json.git
GIT_TAG v3.11.2 # You can specify a version or use the latest tag
)
FetchContent_MakeAvailable(nlohmann_json)
# Global include for nlohmann headers
include_directories(${nlohmann_json_SOURCE_DIR}/single_include)

# COPY DATASET FOLDER
# FILE(COPY "${CMAKE_CURRENT_SOURCE_DIR}/tests" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")

Expand Down Expand Up @@ -111,16 +122,6 @@ if (BUILD_SV_INSTALLER)
add_subdirectory("${CMAKE_SOURCE_DIR}/Distribution")
ENDIF()

# NLOHMANN JSON FOR SERIALIZATION
include(FetchContent)
FetchContent_Declare(
nlohmann_json
GIT_REPOSITORY https://github.com/nlohmann/json.git
GIT_TAG v3.11.2 # You can specify a version or use the latest tag
)
FetchContent_MakeAvailable(nlohmann_json)
target_include_directories(${PROJECT_NAME} PUBLIC ${nlohmann_json_SOURCE_DIR}/single_include)

# Unit tests and Google Test
if(ENABLE_UNIT_TEST)

Expand Down Expand Up @@ -174,13 +175,6 @@ if(ENABLE_UNIT_TEST)
# handle both use cases.
target_link_libraries(UnitTestsExecutable gtest gtest_main pthread)

# We need to include Nlohmann when compiling the unit test executable
# A better strategy would be to build up a list of what we're going to
# include and reuse it.
# Another option: we could simply build the library of cvOneD functionality
# and link against that both here and in the main executable.
target_include_directories(UnitTestsExecutable PRIVATE ${nlohmann_json_SOURCE_DIR}/single_include)

# It's likely we'll need to include additional directories for some
# versions of the unit tests. That can be updated when/if we update
# the general strategy.
Expand Down
2 changes: 1 addition & 1 deletion Code/Source/cvOneDModelManager.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ int cvOneDModelManager::CreateNode(char * nodeName,double x,double y,double z){
}


int cvOneDModelManager::CreateJoint(char * jointName,double x,double y,double z,
int cvOneDModelManager::CreateJoint(const char * jointName,double x,double y,double z,
int numInSegs,int numOutSegs,
int *InSegs,int *OutSegs){

Expand Down
2 changes: 1 addition & 1 deletion Code/Source/cvOneDModelManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class cvOneDModelManager{


// CREATE JOINT
int CreateJoint(char* jointName,double x,double y,double z,
int CreateJoint(const char* jointName,double x,double y,double z,
int numInSegs,int numOutSegs,
int *InSegs,int *OutSegs);

Expand Down
19 changes: 9 additions & 10 deletions Code/Source/cvOneDOptions.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include <string>
#include <math.h>
#include <memory>
#include <optional>
#include "cvOneDTypes.h"
#include "cvOneDException.h"

Expand All @@ -49,21 +50,14 @@ struct options{

// NODE DATA
cvStringVec nodeName;
// Note that these coordinates are also used
// for the joints through an implicit mapping
cvDoubleVec nodeXcoord;
cvDoubleVec nodeYcoord;
cvDoubleVec nodeZcoord;

// JOINT DATA
cvStringVec jointName;

// "jointNode" isn't used currently -- but we want
// to start integrating it. It can be used to make
// the mapping from joint->node explicit. Right
// now, the mapping is implicit: each joint is
// associated with the node of the same index in
// the list of nodes.
// jointNode: name of the associated node that we will use
// to get the XYZ coordinates when creating the joint.
cvStringVec jointNode;

cvStringVec jointInletName;
Expand Down Expand Up @@ -121,7 +115,12 @@ struct options{
double convergenceTolerance;
int useIV;
int useStab;
string outputType;

// These are to preserve legacy behavior and are
// expected to be eventually migrated into a
// post-processing step.
string outputType = "TEXT";
std::optional<int> vtkOutputType = std::nullopt;
};

void validateOptions(options const& opts);
Expand Down
9 changes: 7 additions & 2 deletions Code/Source/cvOneDOptionsJsonParser.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,13 @@ void parseSolverOptions(const nlohmann::ordered_json& jsonData, options& opts) t
opts.useIV = solverOptions.at("useIV").get<int>();
opts.useStab = solverOptions.at("useStab").get<int>();

// Doesn't seem like this is used...but we'll provide a default anyway
opts.outputType = solverOptions.value("outputType", "None");
// Until this is migrated elsewhere, we'll (optionally) store the solver options.
if(solverOptions.contains("outputType")){
opts.outputType = solverOptions.at("outputType").get<std::string>();
}
if(solverOptions.contains("vtkOutputType")){
opts.vtkOutputType = solverOptions.at("vtkOutputType").get<int>();
}

} catch (const std::exception& e) {
throw std::runtime_error("Error parsing 'solverOptions': " + std::string(e.what()));
Expand Down
23 changes: 11 additions & 12 deletions Code/Source/cvOneDOptionsJsonSerializer.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -81,29 +81,22 @@ nlohmann::ordered_json serialize_joint_data(const options& opts) {
nlohmann::ordered_json j = nlohmann::ordered_json::array();

// Check consistency of the joint inlet and outlet vectors
check_consistent_size("joints", opts.jointName,
check_consistent_size("joints", opts.jointName, opts.jointNode,
opts.jointInletListNames, opts.jointInletListNumber,
opts.jointOutletListNames, opts.jointOutletListNumber,
opts.jointInletList, opts.jointOutletList);

size_t jointCount = opts.jointName.size();

// There must be enough nodes for us to make associations. This is
// an implicit assumption made in the current joint creation code.
if( opts.nodeName.size() < jointCount ){
throw std::runtime_error("The implicit joint-node association requires that there be at least as many nodes as there are joints.");
}

for (size_t i = 0; i < jointCount; ++i) {
nlohmann::ordered_json joint;

// Add JOINT data
// We're adding the node based on the index so we
// can start incorporating the explicit association
// from the implicit one currently used in the code.
// Now that we're actually populating the joint node associations
// when deserializing the legacy format, we can utilize them here.
joint["joint"] = {
{"id", "J" + std::to_string(i + 1)},
{"associatedNode", opts.nodeName.at(i)},
{"id", opts.jointName.at(i)},
{"associatedNode", opts.jointNode.at(i)},
{"inletName", opts.jointInletListNames.at(i)},
{"outletName", opts.jointOutletListNames.at(i)}
};
Expand Down Expand Up @@ -225,7 +218,13 @@ nlohmann::ordered_json serializeSolverOptions(options const& opts) {
solverOptions["convergenceTolerance"] = opts.convergenceTolerance;
solverOptions["useIV"] = opts.useIV;
solverOptions["useStab"] = opts.useStab;

// For now, we're serializing the output data.
// In the future, we'll want to migrate these.
solverOptions["outputType"] = opts.outputType;
if(opts.vtkOutputType){
solverOptions["vtkOutputType"] = *opts.vtkOutputType;
}

return solverOptions;
}
Expand Down
50 changes: 33 additions & 17 deletions Code/Source/cvOneDOptionsLegacySerializer.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
#include <string.h>

#include "cvOneDUtility.h"
#include "cvOneDGlobal.h"

namespace cvOneD{

Expand Down Expand Up @@ -126,10 +125,25 @@ void readOptionsLegacyFormat(string inputFile, options* opts){
try{
// Get Joint Name
opts->jointName.push_back(tokenizedString[1]);
// opts->jointNode.push_back(atof(tokenizedString[2].c_str()));
opts->jointNode.push_back(tokenizedString[2]);

// The legacy input files included a "joint node", but it is
// never utilized in simulation. The legacy behavior was, instead
// to simply associate each joint with the sequentially listed
// nodeXcoord, nodeYcoord, and nodeZcoord.
//
// To preserve this legacy behavior, we're not going to deserialize
// the jointNode. Instead, we're going to replace "jointNode" with
// the sequential node item name. That's **wrong** -- we should be
// using "jointNode"!
// But! it preserves the legacy behavior of the existing legacy
// options files when we refactor the actual utilization of the
// options to use the jointNode.
//
// The actual population of the jointNode option is done in a
// post-processing step after we finish processing the file.
opts->jointInletName.push_back(tokenizedString[3]);
opts->jointOutletName.push_back(tokenizedString[4]);

}catch(...){
throw cvException(string("ERROR: Invalid JOINT Format. Line " + to_string(lineCount) + "\n").c_str());
}
Expand Down Expand Up @@ -258,22 +272,13 @@ void readOptionsLegacyFormat(string inputFile, options* opts){
}else if(tokenizedString.size() < 2){
throw cvException(string("ERROR: Not enough parameters for OUTPUT token. Line " + to_string(lineCount) + "\n").c_str());
}
// Output Type
if(upper_string(tokenizedString[1]) == "TEXT"){
cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_TEXT;
}else if(upper_string(tokenizedString[1]) == "VTK"){
cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_VTK;
}else if(upper_string(tokenizedString[1]) == "BOTH"){
cvOneDGlobal::outputType = OutputTypeScope::OUTPUT_BOTH;
}else{
throw cvException("ERROR: Invalid OUTPUT Type.\n");
}

// Collect and store the output type data
opts->outputType = tokenizedString[1];
if(tokenizedString.size() > 2){
cvOneDGlobal::vtkOutputType = atoi(tokenizedString[2].c_str());
if(cvOneDGlobal::vtkOutputType > 1){
throw cvException("ERROR: Invalid OUTPUT VTK Type.\n");
}
opts->vtkOutputType = atoi(tokenizedString[2].c_str());
}

}else if(upper_string(tokenizedString[0]) == std::string("DATATABLE")){
// printf("Found Data Table.\n");
try{
Expand Down Expand Up @@ -361,6 +366,17 @@ void readOptionsLegacyFormat(string inputFile, options* opts){
// Increment Line Count
lineCount++;
}

// Post-process: to preserve legacy behavior, we're
// making each joint node the corresponding sequential
// node from the node array. See comment above for additional details.
if(opts->jointName.size() > opts->nodeName.size()){
throw cvException("ERROR: there must be at least as many nodes as there are joints.\n");
}
for(size_t k = 0; k < opts->jointName.size(); ++k){
opts->jointNode.push_back(opts->nodeName.at(k));
}

// Close File
infile.close();
}
Expand Down
Loading