diff --git a/CHANGELOG.md b/CHANGELOG.md index 134ee952f9..04050e6007 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,12 @@ - Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702. - Fixed bug in the parsing of `transpose` nodes in command line C++ parser. - Implemented forward-backward abstract interpretation, symbolic bound tightening, interval arithmetic and simulations for all activation functions. + - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. - Added the BaBSR heuristic as a new branching strategy for ReLU Splitting - Support Sub of two variables, "Mul" of two constants, Slice, and ConstantOfShape in the python onnx parser - Renamed SmtCore module to SearchTreeHandler + - Implemented backward analysis using INVPROP algorithm with added support for all activation functions. + - Implemented backward analysis using partial multi-neuron relaxation with BBPS-based heuristic for neuron selection. ## Version 2.0.0 diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 87bcfcbbd4..302c171d54 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -68,6 +68,24 @@ const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY = 0; + +const unsigned GlobalConfiguration::INVPROP_MAX_ITERATIONS = 10; +const double GlobalConfiguration::INVPROP_STEP_SIZE = 0.025; +const double GlobalConfiguration::INVPROP_LEARNING_RATE = 0.005; +const double GlobalConfiguration::INVPROP_WEIGHT_DECAY = 0.5; +const double GlobalConfiguration::INVPROP_INITIAL_GAMMA = 0.025; + +const unsigned GlobalConfiguration::PMNR_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PMNR_SELECTED_NEURONS = 2; +const unsigned GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES = 100; + const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; @@ -118,6 +136,7 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; +const unsigned GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS = 10; const bool GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES = true; const bool GlobalConfiguration::MINIMIZE_PROOF_DEPENDENCIES = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 71f2fd3b2d..8cf91a02b6 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -151,6 +151,51 @@ class GlobalConfiguration // Random seed for generating simulation values. static const unsigned SIMULATION_RANDOM_SEED; + // Random seed for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + + // Number of iterations for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_ITERATIONS; + + // Random seed for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; + + // Maximum iterations for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + + // Step size for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + + // Learning rate for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + + // Weight decay for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + + // Maximum iterations for INVPROP optimization. + static const unsigned INVPROP_MAX_ITERATIONS; + + // Step size for INVPROP optimization. + static const double INVPROP_STEP_SIZE; + + // Learning rate for INVPROP optimization. + static const double INVPROP_LEARNING_RATE; + + // Weight decay for INVPROP optimization. + static const double INVPROP_WEIGHT_DECAY; + + // Initial gamma values for INVPROP optimization. + static const double INVPROP_INITIAL_GAMMA; + + // Random seed for PMNR (with randomized hyperplanes). + static const unsigned PMNR_RANDOM_SEED; + + // Number of selected neurons for PMNR (with heuristically selected hyperplanes). + static const unsigned PMNR_SELECTED_NEURONS; + + // Number of candidates for PMNR-BBPS branching points. + static const unsigned PMNR_BBPS_BRANCHING_CANDIDATES; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; @@ -263,6 +308,10 @@ class GlobalConfiguration */ static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + /* How many rounds of backward analysis to perform for PMNR algorithm? + */ + static const unsigned MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS; + /* Analyze lemma dependencies when producing proofs */ static const bool ANALYZE_PROOF_DEPENDENCIES; @@ -271,7 +320,6 @@ class GlobalConfiguration */ static const bool MINIMIZE_PROOF_DEPENDENCIES; - #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index 6864e9fc76..73efed041b 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,7 +267,8 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " - "lp/backward-once/backward-converge/lp-inc/milp/milp-inc/iter-prop/none." ) + "lp/backward-once/backward-converge/backward-preimage-approx/backward-pmnr/lp-inc/milp/" + "milp-inc/iter-prop/none." ) #endif ; diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 657ddb6b7c..d9fa7b0b3b 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -209,6 +209,10 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE; if ( strategyString == "backward-converge" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; + if ( strategyString == "backward-preimage-approx" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; + if ( strategyString == "backward-pmnr" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR; else if ( strategyString == "milp" ) return MILPSolverBoundTighteningType::MILP_ENCODING; else if ( strategyString == "milp-inc" ) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 0ae405211a..b15e636fc1 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1598,6 +1598,8 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR: _networkLevelReasoner->lpRelaxationPropagation(); break; case MILPSolverBoundTighteningType::MILP_ENCODING: @@ -1662,6 +1664,31 @@ void Engine::performAdditionalBackwardAnalysisIfNeeded() printf( "Backward analysis tightened %u bounds\n", tightened ); } } + + if ( _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + { + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + } + + if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) + { + unsigned iter = 1; + unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + while ( tightened && iter < GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS ) + { + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + ++iter; + } + } } void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) @@ -1689,6 +1716,8 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn return; case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: return; diff --git a/src/engine/MILPEncoder.cpp b/src/engine/MILPEncoder.cpp index 2041ff57b1..2e4bf59947 100644 --- a/src/engine/MILPEncoder.cpp +++ b/src/engine/MILPEncoder.cpp @@ -16,9 +16,9 @@ #include "MILPEncoder.h" -#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "GurobiWrapper.h" +#include "Layer.h" #include "TimeUtils.h" MILPEncoder::MILPEncoder( const ITableau &tableau ) @@ -629,14 +629,14 @@ void MILPEncoder::encodeSoftmaxConstraint( GurobiWrapper &gurobi, SoftmaxConstra } if ( !useLSE2 ) { - symbolicLowerBias = NLR::DeepPolySoftmaxElement::LSELowerBound( - sourceMids, sourceLbs, sourceUbs, i ); + symbolicLowerBias = + NLR::Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, i ); if ( !FloatUtils::wellFormed( symbolicLowerBias ) ) wellFormed = false; for ( unsigned j = 0; j < size; ++j ) { - double dldj = NLR::DeepPolySoftmaxElement::dLSELowerBound( - sourceMids, sourceLbs, sourceUbs, i, j ); + double dldj = + NLR::Layer::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); if ( !FloatUtils::wellFormed( dldj ) ) wellFormed = false; terms.append( @@ -646,14 +646,14 @@ void MILPEncoder::encodeSoftmaxConstraint( GurobiWrapper &gurobi, SoftmaxConstra } else { - symbolicLowerBias = NLR::DeepPolySoftmaxElement::LSELowerBound2( - sourceMids, sourceLbs, sourceUbs, i ); + symbolicLowerBias = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); if ( !FloatUtils::wellFormed( symbolicLowerBias ) ) wellFormed = false; for ( unsigned j = 0; j < size; ++j ) { - double dldj = NLR::DeepPolySoftmaxElement::dLSELowerBound2( - sourceMids, sourceLbs, sourceUbs, i, j ); + double dldj = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); if ( !FloatUtils::wellFormed( dldj ) ) wellFormed = false; terms.append( @@ -667,15 +667,14 @@ void MILPEncoder::encodeSoftmaxConstraint( GurobiWrapper &gurobi, SoftmaxConstra // Upper-bound wellFormed = true; double symbolicUpperBias = - NLR::DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); if ( !FloatUtils::wellFormed( symbolicUpperBias ) ) wellFormed = false; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariables[i] ) ) ); for ( unsigned j = 0; j < size; ++j ) { - double dudj = NLR::DeepPolySoftmaxElement::dLSEUpperbound( - sourceMids, targetLbs, targetUbs, i, j ); + double dudj = NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); if ( !FloatUtils::wellFormed( dudj ) ) wellFormed = false; terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariables[j] ) ) ); diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index f317639375..42be276767 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -34,8 +34,14 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis BACKWARD_ANALYSIS_ONCE = 5, BACKWARD_ANALYSIS_CONVERGE = 6, + // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 + // [cs.SE]) + BACKWARD_ANALYSIS_PREIMAGE_APPROX = 7, + // Perform backward analysis using PMNR with INVPROP and BBPS-based neuron selection + // (arXiv:2302.01404v4 [cs.LG], arXiv:2405.21063v3 [cs.LG]). + BACKWARD_ANALYSIS_PMNR = 8, // Option to have no MILP bound tightening performed - NONE = 10, + NONE = 9, }; #endif // __MILPSolverBoundTighteningType_h__ diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h new file mode 100644 index 0000000000..2eddeb176f --- /dev/null +++ b/src/engine/PolygonalTightening.h @@ -0,0 +1,124 @@ +/********************* */ +/*! \file PolygonalTightening.h + ** \verbatim + ** Top contributors (to current version): + ** Duligur Ibeling, Guy Katz, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + **/ + +#ifndef __PolygonalTightening_h__ +#define __PolygonalTightening_h__ + +#include "FloatUtils.h" +#include "MStringf.h" +#include "Map.h" +#include "NeuronIndex.h" + +#include + +class PolygonalTightening +{ +public: + enum PolygonalBoundType { + LB = 0, + UB = 1, + }; + + PolygonalTightening( Map neuronToCoefficient, + double value, + PolygonalBoundType type ) + : _neuronToCoefficient( neuronToCoefficient ) + , _value( value ) + , _type( type ) + { + } + + /* + The coefficient of each neuron. + */ + Map _neuronToCoefficient; + + /* + Its new value. + */ + double _value; + + /* + Whether the tightening tightens the + lower bound or the upper bound. + */ + PolygonalBoundType _type; + + /* + Equality operator. + */ + bool operator==( const PolygonalTightening &other ) const + { + bool allFound = true; + for ( const auto &pair : _neuronToCoefficient ) + { + bool currentFound = false; + for ( const auto &otherPair : other._neuronToCoefficient ) + { + currentFound |= ( pair.first._layer == otherPair.first._layer && + pair.first._neuron == otherPair.first._neuron && + pair.second == otherPair.second ); + } + allFound &= currentFound; + } + bool result = allFound && _value == other._value && _type == other._type && + _neuronToCoefficient.size() == other._neuronToCoefficient.size(); + return result; + } + + /* + Get matching coefficent for a NeuronIndex, + return 0 if the NeuronIndex is not present. + */ + double getCoeff( NLR::NeuronIndex index ) const + { + if ( _neuronToCoefficient.exists( index ) ) + return _neuronToCoefficient[index]; + return 0; + } + + void dump() const + { + String output = "PolygonalTightening: "; + unsigned count = 0; + for ( const auto &pair : _neuronToCoefficient ) + { + double coeff = pair.second; + if ( FloatUtils::isZero( coeff ) ) + continue; + + if ( count ) + { + output += Stringf( "%s %.2lf neuron%u_%u ", + FloatUtils::isPositive( coeff ) ? "+" : "-", + FloatUtils::abs( coeff ), + pair.first._layer, + pair.first._neuron ); + } + else + { + output += + Stringf( "%.2lf neuron%u_%u ", coeff, pair.first._layer, pair.first._neuron ); + } + ++count; + } + if ( count == 0 ) + { + output += Stringf( "%.2lf ", 0 ); + } + output += Stringf( "%s %.2lf", _type == LB ? ">=" : "<=", _value ); + printf( "%s\n", output.ascii() ); + } +}; +#endif // __PolygonalTightening_h diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index e377a638ba..89d353aa26 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -21,6 +21,8 @@ network_level_reasoner_add_unit_test(ParallelSolver) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) + network_level_reasoner_add_unit_test(PMNRSelection) + network_level_reasoner_add_unit_test(PMNR) endif() if (${BUILD_PYTHON}) diff --git a/src/nlr/DeepPolyAbsoluteValueElement.cpp b/src/nlr/DeepPolyAbsoluteValueElement.cpp index 75d7d13461..f2b7274114 100644 --- a/src/nlr/DeepPolyAbsoluteValueElement.cpp +++ b/src/nlr/DeepPolyAbsoluteValueElement.cpp @@ -94,9 +94,26 @@ void DeepPolyAbsoluteValueElement::execute( _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolyAbsoluteValueElement::storePredecessorSymbolicBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; + } +} + void DeepPolyAbsoluteValueElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyAbsoluteValueElement.h b/src/nlr/DeepPolyAbsoluteValueElement.h index 732320220e..2d97086ae9 100644 --- a/src/nlr/DeepPolyAbsoluteValueElement.h +++ b/src/nlr/DeepPolyAbsoluteValueElement.h @@ -33,6 +33,8 @@ class DeepPolyAbsoluteValueElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyAnalysis.cpp b/src/nlr/DeepPolyAnalysis.cpp index 6a7ddb09db..0ea2cd67d5 100644 --- a/src/nlr/DeepPolyAnalysis.cpp +++ b/src/nlr/DeepPolyAnalysis.cpp @@ -39,14 +39,38 @@ namespace NLR { -DeepPolyAnalysis::DeepPolyAnalysis( LayerOwner *layerOwner ) +DeepPolyAnalysis::DeepPolyAnalysis( LayerOwner *layerOwner, + bool storeOutputSymbolicBounds, + bool storePredecessorSymbolicBounds, + bool useParameterisedSBT, + Map> *layerIndicesToParameters, + Map> *outputSymbolicLb, + Map> *outputSymbolicUb, + Map> *outputSymbolicLowerBias, + Map> *outputSymbolicUpperBias, + Map> *predecessorSymbolicLb, + Map> *predecessorSymbolicUb, + Map> *predecessorSymbolicLowerBias, + Map> *predecessorSymbolicUpperBias ) : _layerOwner( layerOwner ) + , _storeOutputSymbolicBounds( storeOutputSymbolicBounds ) + , _storePredecessorSymbolicBounds( storePredecessorSymbolicBounds ) + , _useParameterisedSBT( useParameterisedSBT ) + , _layerIndicesToParameters( layerIndicesToParameters ) , _work1SymbolicLb( NULL ) , _work1SymbolicUb( NULL ) , _work2SymbolicLb( NULL ) , _work2SymbolicUb( NULL ) , _workSymbolicLowerBias( NULL ) , _workSymbolicUpperBias( NULL ) + , _outputSymbolicLb( outputSymbolicLb ) + , _outputSymbolicUb( outputSymbolicUb ) + , _outputSymbolicLowerBias( outputSymbolicLowerBias ) + , _outputSymbolicUpperBias( outputSymbolicUpperBias ) + , _predecessorSymbolicLb( predecessorSymbolicLb ) + , _predecessorSymbolicUb( predecessorSymbolicUb ) + , _predecessorSymbolicLowerBias( predecessorSymbolicLowerBias ) + , _predecessorSymbolicUpperBias( predecessorSymbolicUpperBias ) { const Map &layers = _layerOwner->getLayerIndexToLayer(); // Get the maximal layer size @@ -148,6 +172,8 @@ void DeepPolyAnalysis::run() { if ( layer->neuronEliminated( j ) ) continue; + if ( _storeOutputSymbolicBounds && index == _layerOwner->getNumberOfLayers() - 1 ) + continue; double lb = deepPolyElement->getLowerBound( j ); if ( layer->getLb( j ) < lb ) { @@ -157,6 +183,7 @@ void DeepPolyAnalysis::run() layer->getLb( j ), lb ) ); layer->setLb( j, lb ); + _layerOwner->receiveTighterBound( Tightening( layer->neuronToVariable( j ), lb, Tightening::LB ) ); } @@ -169,6 +196,7 @@ void DeepPolyAnalysis::run() layer->getUb( j ), ub ) ); layer->setUb( j, ub ); + _layerOwner->receiveTighterBound( Tightening( layer->neuronToVariable( j ), ub, Tightening::UB ) ); } @@ -235,6 +263,26 @@ DeepPolyElement *DeepPolyAnalysis::createDeepPolyElement( Layer *layer ) else throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, Stringf( "Layer %u not yet supported", layer->getLayerType() ).ascii() ); + + Map _layerIndexToLayer = _layerOwner->getLayerIndexToLayer(); + Layer *outputLayer = _layerIndexToLayer[_layerOwner->getNumberOfLayers() - 1]; + unsigned outputLayerSize = outputLayer->getSize(); + deepPolyElement->setOutputLayerSize( outputLayerSize ); + deepPolyElement->setStorePredecessorSymbolicBounds( _storePredecessorSymbolicBounds ); + if ( layer->getLayerIndex() == _layerOwner->getNumberOfLayers() - 1 ) + { + deepPolyElement->setStoreOutputSymbolicBounds( _storeOutputSymbolicBounds ); + } + deepPolyElement->setUseParameterisedSBT( _useParameterisedSBT ); + deepPolyElement->setLayerIndicesToParameters( _layerIndicesToParameters ); + deepPolyElement->setSymbolicBoundsMemory( _outputSymbolicLb, + _outputSymbolicUb, + _outputSymbolicLowerBias, + _outputSymbolicUpperBias, + _predecessorSymbolicLb, + _predecessorSymbolicUb, + _predecessorSymbolicLowerBias, + _predecessorSymbolicUpperBias ); return deepPolyElement; } diff --git a/src/nlr/DeepPolyAnalysis.h b/src/nlr/DeepPolyAnalysis.h index ddadd1c84d..158506dd25 100644 --- a/src/nlr/DeepPolyAnalysis.h +++ b/src/nlr/DeepPolyAnalysis.h @@ -28,13 +28,29 @@ namespace NLR { class DeepPolyAnalysis { public: - DeepPolyAnalysis( LayerOwner *layerOwner ); + DeepPolyAnalysis( LayerOwner *layerOwner, + bool storeOutputSymbolicBounds = false, + bool storePredecessorSymbolicBounds = false, + bool useParameterisedSBT = false, + Map> *layerIndicesToParameters = NULL, + Map> *outputSymbolicLb = NULL, + Map> *outputSymbolicUb = NULL, + Map> *outputSymbolicLowerBias = NULL, + Map> *outputSymbolicUpperBias = NULL, + Map> *predecessorSymbolicLb = NULL, + Map> *predecessorSymbolicUb = NULL, + Map> *predecessorSymbolicLowerBias = NULL, + Map> *predecessorSymbolicUpperBias = NULL ); ~DeepPolyAnalysis(); void run(); private: LayerOwner *_layerOwner; + bool _storeOutputSymbolicBounds; + bool _storePredecessorSymbolicBounds; + bool _useParameterisedSBT; + Map> *_layerIndicesToParameters; /* Maps layer index to the abstract element @@ -51,6 +67,16 @@ class DeepPolyAnalysis double *_workSymbolicLowerBias; double *_workSymbolicUpperBias; + Map> *_outputSymbolicLb; + Map> *_outputSymbolicUb; + Map> *_outputSymbolicLowerBias; + Map> *_outputSymbolicUpperBias; + + Map> *_predecessorSymbolicLb; + Map> *_predecessorSymbolicUb; + Map> *_predecessorSymbolicLowerBias; + Map> *_predecessorSymbolicUpperBias; + unsigned _maxLayerSize; void allocateMemory(); diff --git a/src/nlr/DeepPolyBilinearElement.cpp b/src/nlr/DeepPolyBilinearElement.cpp index e8d2aef311..b1c8b5f80f 100644 --- a/src/nlr/DeepPolyBilinearElement.cpp +++ b/src/nlr/DeepPolyBilinearElement.cpp @@ -89,19 +89,54 @@ void DeepPolyBilinearElement::execute( _lb[i] = std::max( lb, _lb[i] ); _ub[i] = std::min( ub, _ub[i] ); - // Symbolic lower bound: - // out >= alpha * x + beta * y + gamma - // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y - _symbolicLbA[i] = sourceLbs[1]; - _symbolicLbB[i] = sourceLbs[0]; - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - - // Symbolic upper bound: - // out <= alpha * x + beta * y + gamma - // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y - _symbolicUbA[i] = sourceUbs[1]; - _symbolicUbB[i] = sourceLbs[0]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + if ( !_useParameterisedSBT ) + { + // Symbolic lower bound: + // out >= alpha * x + beta * y + gamma + // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y + _symbolicLbA[i] = sourceLbs[1]; + _symbolicLbB[i] = sourceLbs[0]; + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + + // Symbolic upper bound: + // out <= alpha * x + beta * y + gamma + // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + _symbolicUbA[i] = sourceUbs[1]; + _symbolicUbB[i] = sourceLbs[0]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + } + else + { + Vector coeffs = ( *_layerIndicesToParameters )[_layerIndex]; + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + // Bilinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= aLower * x + bLower * y + c_l, where + // aLower = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // bLower = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= aUpper * x + bUpper * y + c_u, where + // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + _symbolicLbA[i] = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + _symbolicUbA[i] = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + + _symbolicLbB[i] = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + _symbolicUbB[i] = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; + } + } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds(); } DEBUG( { @@ -115,6 +150,19 @@ void DeepPolyBilinearElement::execute( log( "Executing - done" ); } +void DeepPolyBilinearElement::storePredecessorSymbolicBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLbA[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUbA[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size + i] = _symbolicLbB[i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size + i] = _symbolicUbB[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; + } +} + void DeepPolyBilinearElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyBilinearElement.h b/src/nlr/DeepPolyBilinearElement.h index 8b0ce8dcce..6e8d82e8c6 100644 --- a/src/nlr/DeepPolyBilinearElement.h +++ b/src/nlr/DeepPolyBilinearElement.h @@ -33,6 +33,8 @@ class DeepPolyBilinearElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyElement.cpp b/src/nlr/DeepPolyElement.cpp index d95e62ceb6..0652047f50 100644 --- a/src/nlr/DeepPolyElement.cpp +++ b/src/nlr/DeepPolyElement.cpp @@ -1,159 +1,282 @@ -/********************* */ -/*! \file DeepPolyElement.cpp - ** \verbatim - ** Top contributors (to current version): - ** Haoze Andrew Wu - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#include "DeepPolyElement.h" - -namespace NLR { - -DeepPolyElement::DeepPolyElement() - : _layer( NULL ) - , _size( 0 ) - , _layerIndex( 0 ) - , _symbolicLb( NULL ) - , _symbolicUb( NULL ) - , _symbolicLowerBias( NULL ) - , _symbolicUpperBias( NULL ) - , _lb( NULL ) - , _ub( NULL ) - , _work1SymbolicLb( NULL ) - , _work1SymbolicUb( NULL ) - , _work2SymbolicLb( NULL ) - , _work2SymbolicUb( NULL ) - , _workSymbolicLowerBias( NULL ) - , _workSymbolicUpperBias( NULL ){}; - -unsigned DeepPolyElement::getSize() const -{ - return _size; -} - -unsigned DeepPolyElement::getLayerIndex() const -{ - return _layerIndex; -} - -Layer::Type DeepPolyElement::getLayerType() const -{ - return _layer->getLayerType(); -} - -bool DeepPolyElement::hasPredecessor() -{ - return !_layer->getSourceLayers().empty(); -} - -const Map &DeepPolyElement::getPredecessorIndices() const -{ - const Map &sourceLayers = _layer->getSourceLayers(); - return sourceLayers; -} - -double *DeepPolyElement::getSymbolicLb() const -{ - return _symbolicLb; -} - -double *DeepPolyElement::getSymbolicUb() const -{ - return _symbolicUb; -} - -double *DeepPolyElement::getSymbolicLowerBias() const -{ - return _symbolicLowerBias; -} - -double *DeepPolyElement::getSymbolicUpperBias() const -{ - return _symbolicUpperBias; -} - -double DeepPolyElement::getLowerBound( unsigned index ) const -{ - ASSERT( index < getSize() ); - return _lb[index]; -} - -double DeepPolyElement::getUpperBound( unsigned index ) const -{ - ASSERT( index < getSize() ); - return _ub[index]; -} - -double DeepPolyElement::getLowerBoundFromLayer( unsigned index ) const -{ - ASSERT( index < getSize() ); - return _layer->getLb( index ); -} - -double DeepPolyElement::getUpperBoundFromLayer( unsigned index ) const -{ - ASSERT( index < getSize() ); - return _layer->getUb( index ); -} - -void DeepPolyElement::getConcreteBounds() -{ - unsigned size = getSize(); - for ( unsigned i = 0; i < size; ++i ) - { - _lb[i] = _layer->getLb( i ); - _ub[i] = _layer->getUb( i ); - } -} - -void DeepPolyElement::allocateMemory() -{ - freeMemoryIfNeeded(); - - unsigned size = getSize(); - _lb = new double[size]; - _ub = new double[size]; - - std::fill_n( _lb, size, FloatUtils::negativeInfinity() ); - std::fill_n( _ub, size, FloatUtils::infinity() ); -} - -void DeepPolyElement::freeMemoryIfNeeded() -{ - if ( _lb ) - { - delete[] _lb; - _lb = NULL; - } - - if ( _ub ) - { - delete[] _ub; - _ub = NULL; - } -} - -void DeepPolyElement::setWorkingMemory( double *work1SymbolicLb, - double *work1SymbolicUb, - double *work2SymbolicLb, - double *work2SymbolicUb, - double *workSymbolicLowerBias, - double *workSymbolicUpperBias ) -{ - _work1SymbolicLb = work1SymbolicLb; - _work1SymbolicUb = work1SymbolicUb; - _work2SymbolicLb = work2SymbolicLb; - _work2SymbolicUb = work2SymbolicUb; - _workSymbolicLowerBias = workSymbolicLowerBias; - _workSymbolicUpperBias = workSymbolicUpperBias; -} - -} // namespace NLR +/********************* */ +/*! \file DeepPolyElement.cpp + ** \verbatim + ** Top contributors (to current version): + ** Haoze Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "DeepPolyElement.h" + +namespace NLR { + +DeepPolyElement::DeepPolyElement() + : _layer( NULL ) + , _size( 0 ) + , _layerIndex( 0 ) + , _storeOutputSymbolicBounds( false ) + , _storePredecessorSymbolicBounds( false ) + , _useParameterisedSBT( false ) + , _layerIndicesToParameters( NULL ) + , _outputLayerSize( 0 ) + , _symbolicLb( NULL ) + , _symbolicUb( NULL ) + , _symbolicLowerBias( NULL ) + , _symbolicUpperBias( NULL ) + , _lb( NULL ) + , _ub( NULL ) + , _work1SymbolicLb( NULL ) + , _work1SymbolicUb( NULL ) + , _work2SymbolicLb( NULL ) + , _work2SymbolicUb( NULL ) + , _workSymbolicLowerBias( NULL ) + , _workSymbolicUpperBias( NULL ){}; + +unsigned DeepPolyElement::getSize() const +{ + return _size; +} +unsigned DeepPolyElement::getLayerIndex() const +{ + return _layerIndex; +} + +Layer::Type DeepPolyElement::getLayerType() const +{ + return _layer->getLayerType(); +} + +bool DeepPolyElement::hasPredecessor() +{ + return !_layer->getSourceLayers().empty(); +} + +const Map &DeepPolyElement::getPredecessorIndices() const +{ + const Map &sourceLayers = _layer->getSourceLayers(); + return sourceLayers; +} + +double *DeepPolyElement::getSymbolicLb() const +{ + return _symbolicLb; +} + +double *DeepPolyElement::getSymbolicUb() const +{ + return _symbolicUb; +} + +double *DeepPolyElement::getSymbolicLowerBias() const +{ + return _symbolicLowerBias; +} + +double *DeepPolyElement::getSymbolicUpperBias() const +{ + return _symbolicUpperBias; +} + +double DeepPolyElement::getLowerBound( unsigned index ) const +{ + ASSERT( index < getSize() ); + return _lb[index]; +} + +double DeepPolyElement::getUpperBound( unsigned index ) const +{ + ASSERT( index < getSize() ); + return _ub[index]; +} + +void DeepPolyElement::setStoreOutputSymbolicBounds( bool storeOutputSymbolicBounds ) +{ + _storeOutputSymbolicBounds = storeOutputSymbolicBounds; +} + +void DeepPolyElement::setStorePredecessorSymbolicBounds( bool storePredecessorSymbolicBounds ) +{ + _storePredecessorSymbolicBounds = storePredecessorSymbolicBounds; +} + +void DeepPolyElement::setUseParameterisedSBT( bool useParameterisedSBT ) +{ + _useParameterisedSBT = useParameterisedSBT; +} + +void DeepPolyElement::setLayerIndicesToParameters( + Map> *layerIndicesToParameters ) +{ + _layerIndicesToParameters = layerIndicesToParameters; +} + +void DeepPolyElement::setOutputLayerSize( unsigned outputLayerSize ) +{ + _outputLayerSize = outputLayerSize; +} + +double DeepPolyElement::getLowerBoundFromLayer( unsigned index ) const +{ + ASSERT( index < getSize() ); + return _layer->getLb( index ); +} + +double DeepPolyElement::getUpperBoundFromLayer( unsigned index ) const +{ + ASSERT( index < getSize() ); + return _layer->getUb( index ); +} + +void DeepPolyElement::getConcreteBounds() +{ + unsigned size = getSize(); + for ( unsigned i = 0; i < size; ++i ) + { + _lb[i] = _layer->getLb( i ); + _ub[i] = _layer->getUb( i ); + } +} + +void DeepPolyElement::allocateMemory() +{ + freeMemoryIfNeeded(); + + unsigned size = getSize(); + _lb = new double[size]; + _ub = new double[size]; + + std::fill_n( _lb, size, FloatUtils::negativeInfinity() ); + std::fill_n( _ub, size, FloatUtils::infinity() ); +} + +void DeepPolyElement::freeMemoryIfNeeded() +{ + if ( _lb ) + { + delete[] _lb; + _lb = NULL; + } + + if ( _ub ) + { + delete[] _ub; + _ub = NULL; + } +} + +void DeepPolyElement::setWorkingMemory( double *work1SymbolicLb, + double *work1SymbolicUb, + double *work2SymbolicLb, + double *work2SymbolicUb, + double *workSymbolicLowerBias, + double *workSymbolicUpperBias ) +{ + _work1SymbolicLb = work1SymbolicLb; + _work1SymbolicUb = work1SymbolicUb; + _work2SymbolicLb = work2SymbolicLb; + _work2SymbolicUb = work2SymbolicUb; + _workSymbolicLowerBias = workSymbolicLowerBias; + _workSymbolicUpperBias = workSymbolicUpperBias; +} + +void DeepPolyElement::setSymbolicBoundsMemory( + Map> *outputSymbolicLb, + Map> *outputSymbolicUb, + Map> *outputSymbolicLowerBias, + Map> *outputSymbolicUpperBias, + Map> *predecessorSymbolicLb, + Map> *predecessorSymbolicUb, + Map> *predecessorSymbolicLowerBias, + Map> *predecessorSymbolicUpperBias ) +{ + _outputSymbolicLb = outputSymbolicLb; + _outputSymbolicUb = outputSymbolicUb; + _outputSymbolicLowerBias = outputSymbolicLowerBias; + _outputSymbolicUpperBias = outputSymbolicUpperBias; + _predecessorSymbolicLb = predecessorSymbolicLb; + _predecessorSymbolicUb = predecessorSymbolicUb; + _predecessorSymbolicLowerBias = predecessorSymbolicLowerBias; + _predecessorSymbolicUpperBias = predecessorSymbolicUpperBias; +} + +void DeepPolyElement::storeOutputSymbolicBounds( + double *work1SymbolicLb, + double *work1SymbolicUb, + double *workSymbolicLowerBias, + double *workSymbolicUpperBias, + Map &residualLb, + Map &residualUb, + Set &residualLayerIndices, + const Map &deepPolyElementsBefore ) +{ + // Remove externally fixed neurons from symbolic bounds, replace them with their value. + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _layer->neuronEliminated( i ) ) + { + double value = _layer->getEliminatedNeuronValue( i ); + for ( unsigned j = 0; j < _outputLayerSize; ++j ) + { + workSymbolicLowerBias[i] += work1SymbolicLb[i * _size + j] * value; + workSymbolicUpperBias[i] += work1SymbolicUb[i * _size + j] * value; + work1SymbolicLb[i * _size + j] = 0; + work1SymbolicUb[i * _size + j] = 0; + } + } + } + + // Remove residual layers from symbolic bounds, concretize them instead. + Vector symbolicLowerBiasConcretizedResiduals( _outputLayerSize, 0 ); + Vector symbolicUpperBiasConcretizedResiduals( _outputLayerSize, 0 ); + for ( unsigned i = 0; i < _outputLayerSize; ++i ) + { + symbolicLowerBiasConcretizedResiduals[i] = workSymbolicLowerBias[i]; + symbolicUpperBiasConcretizedResiduals[i] = workSymbolicUpperBias[i]; + } + for ( const auto &residualLayerIndex : residualLayerIndices ) + { + DeepPolyElement *residualElement = deepPolyElementsBefore[residualLayerIndex]; + double *currentResidualLb = residualLb[residualLayerIndex]; + double *currentResidualUb = residualUb[residualLayerIndex]; + + // Get concrete bounds for residual neurons. + for ( unsigned i = 0; i < residualElement->getSize(); ++i ) + { + double sourceLb = residualElement->getLowerBoundFromLayer( i ) - + GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + double sourceUb = residualElement->getUpperBoundFromLayer( i ) + + GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + + for ( unsigned j = 0; j < _outputLayerSize; ++j ) + { + double lowerWeight = currentResidualLb[i * _outputLayerSize + j]; + double upperWeight = currentResidualUb[i * _outputLayerSize + j]; + symbolicLowerBiasConcretizedResiduals[j] += + lowerWeight >= 0 ? lowerWeight * sourceLb : lowerWeight * sourceUb; + symbolicUpperBiasConcretizedResiduals[j] += + upperWeight >= 0 ? upperWeight * sourceUb : upperWeight * sourceLb; + } + } + } + + // Store updated bounds. + for ( unsigned i = 0; i < _size * _outputLayerSize; ++i ) + { + ( *_outputSymbolicLb )[_layerIndex][i] = work1SymbolicLb[i]; + ( *_outputSymbolicUb )[_layerIndex][i] = work1SymbolicUb[i]; + } + for ( unsigned i = 0; i < _outputLayerSize; ++i ) + { + ( *_outputSymbolicLowerBias )[_layerIndex][i] = symbolicLowerBiasConcretizedResiduals[i]; + ( *_outputSymbolicUpperBias )[_layerIndex][i] = symbolicUpperBiasConcretizedResiduals[i]; + } +} + +} // namespace NLR diff --git a/src/nlr/DeepPolyElement.h b/src/nlr/DeepPolyElement.h index ee86d62eca..1c0b05953e 100644 --- a/src/nlr/DeepPolyElement.h +++ b/src/nlr/DeepPolyElement.h @@ -69,6 +69,12 @@ class DeepPolyElement double getLowerBound( unsigned index ) const; double getUpperBound( unsigned index ) const; + void setStoreOutputSymbolicBounds( bool storeOutputSymbolicBounds ); + void setStorePredecessorSymbolicBounds( bool storePredecessorSymbolicBounds ); + void setUseParameterisedSBT( bool useParameterisedSBT ); + void setLayerIndicesToParameters( Map> *layerIndicesToParameters ); + void setOutputLayerSize( unsigned outputLayerSize ); + void setWorkingMemory( double *work1SymbolicLb, double *work1SymbolicUb, double *work2SymbolicLb, @@ -76,6 +82,25 @@ class DeepPolyElement double *workSymbolicLowerBias, double *workSymbolicUpperBias ); + void setSymbolicBoundsMemory( Map> *outputSymbolicLb, + Map> *outputSymbolicUb, + Map> *outputSymbolicLowerBias, + Map> *outputSymbolicUpperBias, + Map> *predecessorSymbolicLb, + Map> *predecessorSymbolicUb, + Map> *predecessorSymbolicLowerBias, + Map> *predecessorSymbolicUpperBias ); + + void + storeOutputSymbolicBounds( double *work1SymbolicLb, + double *work1SymbolicUb, + double *workSymbolicLowerBias, + double *workSymbolicUpperBias, + Map &residualLb, + Map &residualUb, + Set &residualLayerIndices, + const Map &deepPolyElementsBefore ); + double getLowerBoundFromLayer( unsigned index ) const; double getUpperBoundFromLayer( unsigned index ) const; @@ -83,6 +108,11 @@ class DeepPolyElement Layer *_layer; unsigned _size; unsigned _layerIndex; + bool _storeOutputSymbolicBounds; + bool _storePredecessorSymbolicBounds; + bool _useParameterisedSBT; + Map> *_layerIndicesToParameters; + unsigned _outputLayerSize; /* Abstract element described in @@ -103,6 +133,16 @@ class DeepPolyElement double *_workSymbolicLowerBias; double *_workSymbolicUpperBias; + Map> *_outputSymbolicLb; + Map> *_outputSymbolicUb; + Map> *_outputSymbolicLowerBias; + Map> *_outputSymbolicUpperBias; + + Map> *_predecessorSymbolicLb; + Map> *_predecessorSymbolicUb; + Map> *_predecessorSymbolicLowerBias; + Map> *_predecessorSymbolicUpperBias; + void allocateMemory(); void freeMemoryIfNeeded(); diff --git a/src/nlr/DeepPolyLeakyReLUElement.cpp b/src/nlr/DeepPolyLeakyReLUElement.cpp index f944d3ac73..ee63944f43 100644 --- a/src/nlr/DeepPolyLeakyReLUElement.cpp +++ b/src/nlr/DeepPolyLeakyReLUElement.cpp @@ -50,87 +50,165 @@ void DeepPolyLeakyReLUElement::execute( double sourceLb = predecessor->getLowerBound( sourceIndex._neuron ); double sourceUb = predecessor->getUpperBound( sourceIndex._neuron ); - if ( !FloatUtils::isNegative( sourceLb ) ) + if ( !_useParameterisedSBT ) { - // Phase active - // Symbolic bound: x_b <= x_f <= x_b - // Concrete bound: lb_b <= x_f <= ub_b - _symbolicUb[i] = 1; - _symbolicUpperBias[i] = 0; - _ub[i] = sourceUb; - - _symbolicLb[i] = 1; - _symbolicLowerBias[i] = 0; - _lb[i] = sourceLb; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // Phase inactive - // Symbolic bound: slope * x_b <= x_f <= slope * x_b - // Concrete bound: slope * lb_b <= x_f <= slope * ub_b - _symbolicUb[i] = _slope; - _symbolicUpperBias[i] = 0; - _ub[i] = _slope * sourceUb; - - _symbolicLb[i] = _slope; - _symbolicLowerBias[i] = 0; - _lb[i] = _slope * sourceLb; - } - else - { - // LeakyReLU not fixed - // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) - // Concrete upper bound: x_f <= ub_b - double width = sourceUb - sourceLb; - double coeff = ( sourceUb - _slope * sourceLb ) / width; - - if ( _slope <= 1 ) + if ( !FloatUtils::isNegative( sourceLb ) ) { - _symbolicUb[i] = coeff; - _symbolicUpperBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; _ub[i] = sourceUb; - // For the lower bound, in general, x_f >= lambda * x_b, where - // 0 <= lambda <= 1, would be a sound lower bound. We - // use the heuristic described in section 4.1 of - // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - // to set the value of lambda (either 0 or 1 is considered). - if ( sourceUb > sourceLb ) + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: slope * x_b <= x_f <= slope * x_b + // Concrete bound: slope * lb_b <= x_f <= slope * ub_b + _symbolicUb[i] = _slope; + _symbolicUpperBias[i] = 0; + _ub[i] = _slope * sourceUb; + + _symbolicLb[i] = _slope; + _symbolicLowerBias[i] = 0; + _lb[i] = _slope * sourceLb; + } + else + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _slope * sourceLb ) / width; + + if ( _slope <= 1 ) { - // lambda = 1 - // Symbolic lower bound: x_f >= x_b - // Concrete lower bound: x_f >= sourceLb - _symbolicLb[i] = 1; - _symbolicLowerBias[i] = 0; - _lb[i] = sourceLb; + _symbolicUb[i] = weight; + _symbolicUpperBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; + _ub[i] = sourceUb; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= 0 + // Concrete lower bound: x_f >= 0 + _symbolicLb[i] = _slope; + _symbolicLowerBias[i] = 0; + _lb[i] = _slope * sourceLb; + } } else { - // lambda = 1 - // Symbolic lower bound: x_f >= 0 - // Concrete lower bound: x_f >= 0 - _symbolicLb[i] = _slope; - _symbolicLowerBias[i] = 0; + _symbolicLb[i] = weight; + _symbolicLowerBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; _lb[i] = _slope * sourceLb; + + if ( sourceUb > sourceLb ) + { + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; + _ub[i] = sourceUb; + } + else + { + _symbolicUb[i] = _slope; + _symbolicLowerBias[i] = 0; + _ub[i] = _slope * sourceUb; + } } } - else + } + else + { + Vector coeffs = ( *_layerIndicesToParameters )[_layerIndex]; + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; + _ub[i] = sourceUb; + + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) { - _symbolicLb[i] = coeff; - _symbolicLowerBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; + // Phase inactive + // Symbolic bound: slope * x_b <= x_f <= slope * x_b + // Concrete bound: slope * lb_b <= x_f <= slope * ub_b + _symbolicUb[i] = _slope; + _symbolicUpperBias[i] = 0; + _ub[i] = _slope * sourceUb; + + _symbolicLb[i] = _slope; + _symbolicLowerBias[i] = 0; _lb[i] = _slope * sourceLb; + } + else + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _slope * sourceLb ) / width; - if ( sourceUb > sourceLb ) + if ( _slope <= 1 ) { - _symbolicUb[i] = 1; - _symbolicUpperBias[i] = 0; + _symbolicUb[i] = weight; + _symbolicUpperBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; _ub[i] = sourceUb; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + + // lambda = ( ( 1 - _slope ) * coeff + _slope ) + // Symbolic lower bound: x_f >= ( ( 1 - _slope ) * coeff + _slope ) * x_b + // Concrete lower bound: x_f >= ( ( 1 - _slope ) * coeff + _slope ) * sourceLb + _symbolicLb[i] = ( ( 1 - _slope ) * coeff + _slope ); + _symbolicLowerBias[i] = 0; + _lb[i] = ( ( 1 - _slope ) * coeff + _slope ) * sourceLb; } else { - _symbolicUb[i] = _slope; - _symbolicLowerBias[i] = 0; - _ub[i] = _slope * sourceUb; + _symbolicLb[i] = weight; + _symbolicLowerBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; + _lb[i] = _slope * sourceLb; + + // lambda = ( ( 1 - _slope ) * coeff + _slope ) + // Symbolic upper bound: x_f >= ( ( 1 - _slope ) * coeff + _slope ) * x_b + // Concrete upper bound: x_f >= ( ( 1 - _slope ) * coeff + _slope ) * sourceUb + _symbolicUb[i] = ( ( 1 - _slope ) * coeff + _slope ); + _symbolicUpperBias[i] = 0; + _ub[i] = ( ( 1 - _slope ) * coeff + _slope ) * sourceUb; } } } @@ -142,9 +220,26 @@ void DeepPolyLeakyReLUElement::execute( _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolyLeakyReLUElement::storePredecessorSymbolicBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; + } +} + void DeepPolyLeakyReLUElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyLeakyReLUElement.h b/src/nlr/DeepPolyLeakyReLUElement.h index 01ae5876b3..4349836eac 100644 --- a/src/nlr/DeepPolyLeakyReLUElement.h +++ b/src/nlr/DeepPolyLeakyReLUElement.h @@ -33,6 +33,8 @@ class DeepPolyLeakyReLUElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyMaxPoolElement.cpp b/src/nlr/DeepPolyMaxPoolElement.cpp index 43f7084978..398f5493ee 100644 --- a/src/nlr/DeepPolyMaxPoolElement.cpp +++ b/src/nlr/DeepPolyMaxPoolElement.cpp @@ -40,6 +40,8 @@ void DeepPolyMaxPoolElement::execute( // Update the symbolic and concrete upper- and lower- bounds // of each neuron + Vector maxLowerBoundIndices( _size ); + Vector maxUpperBounds( _size ); for ( unsigned i = 0; i < _size; ++i ) { log( Stringf( "Handling Neuron %u_%u...", _layerIndex, i ) ); @@ -83,6 +85,10 @@ void DeepPolyMaxPoolElement::execute( } } + _phaseFixed[i] = phaseFixed; + maxLowerBoundIndices[i] = indexOfMaxLowerBound._neuron; + maxUpperBounds[i] = maxUpperBound; + if ( phaseFixed ) { log( Stringf( "Neuron %u_%u fixed to Neuron %u_%u", @@ -110,9 +116,38 @@ void DeepPolyMaxPoolElement::execute( log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); log( Stringf( "Handling Neuron %u_%u - done", _layerIndex, i ) ); } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds( maxLowerBoundIndices, maxUpperBounds ); + } + log( "Executing - done" ); } +void DeepPolyMaxPoolElement::storePredecessorSymbolicBounds( + const Vector &indexOfMaxLowerBound, + const Vector &maxUpperBound ) +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _phaseFixed[i] ) + { + ( *_predecessorSymbolicLb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 1; + ( *_predecessorSymbolicUb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 1; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = 0; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = 0; + } + else + { + ( *_predecessorSymbolicLb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 1; + ( *_predecessorSymbolicUb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 0; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = 0; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = maxUpperBound[i]; + } + } +} + void DeepPolyMaxPoolElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyMaxPoolElement.h b/src/nlr/DeepPolyMaxPoolElement.h index 9b16cd2257..b772104081 100644 --- a/src/nlr/DeepPolyMaxPoolElement.h +++ b/src/nlr/DeepPolyMaxPoolElement.h @@ -34,6 +34,9 @@ class DeepPolyMaxPoolElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds( const Vector &indexOfMaxLowerBound, + const Vector &maxUpperBound ); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyReLUElement.cpp b/src/nlr/DeepPolyReLUElement.cpp index 678c1edc5c..bbb3b4c161 100644 --- a/src/nlr/DeepPolyReLUElement.cpp +++ b/src/nlr/DeepPolyReLUElement.cpp @@ -46,65 +46,118 @@ void DeepPolyReLUElement::execute( const Map &deepP double sourceLb = predecessor->getLowerBound( sourceIndex._neuron ); double sourceUb = predecessor->getUpperBound( sourceIndex._neuron ); - if ( !FloatUtils::isNegative( sourceLb ) ) + if ( !_useParameterisedSBT ) { - // Phase active - // Symbolic bound: x_b <= x_f <= x_b - // Concrete bound: lb_b <= x_f <= ub_b - _symbolicUb[i] = 1; - _symbolicUpperBias[i] = 0; - _ub[i] = sourceUb; - - _symbolicLb[i] = 1; - _symbolicLowerBias[i] = 0; - _lb[i] = sourceLb; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // Phase inactive - // Symbolic bound: 0 <= x_f <= 0 - // Concrete bound: 0 <= x_f <= 0 - _symbolicUb[i] = 0; - _symbolicUpperBias[i] = 0; - _ub[i] = 0; - - _symbolicLb[i] = 0; - _symbolicLowerBias[i] = 0; - _lb[i] = 0; + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; + _ub[i] = sourceUb; + + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: 0 <= x_f <= 0 + // Concrete bound: 0 <= x_f <= 0 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = 0; + _ub[i] = 0; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = 0; + _lb[i] = 0; + } + else + { + // ReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double weight = sourceUb / ( sourceUb - sourceLb ); + _symbolicUb[i] = weight; + _symbolicUpperBias[i] = -sourceLb * weight; + _ub[i] = sourceUb; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > -sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= 0 + // Concrete lower bound: x_f >= 0 + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = 0; + _lb[i] = 0; + } + } } else { - // ReLU not fixed - // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) - // Concrete upper bound: x_f <= ub_b - double coeff = sourceUb / ( sourceUb - sourceLb ); - _symbolicUb[i] = coeff; - _symbolicUpperBias[i] = -sourceLb * coeff; - _ub[i] = sourceUb; - - // For the lower bound, in general, x_f >= lambda * x_b, where - // 0 <= lambda <= 1, would be a sound lower bound. We - // use the heuristic described in section 4.1 of - // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - // to set the value of lambda (either 0 or 1 is considered). - if ( sourceUb > -sourceLb ) + Vector coeffs = ( *_layerIndicesToParameters )[_layerIndex]; + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + if ( !FloatUtils::isNegative( sourceLb ) ) { - // lambda = 1 - // Symbolic lower bound: x_f >= x_b - // Concrete lower bound: x_f >= sourceLb + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; + _ub[i] = sourceUb; + _symbolicLb[i] = 1; _symbolicLowerBias[i] = 0; _lb[i] = sourceLb; } - else + else if ( !FloatUtils::isPositive( sourceUb ) ) { - // lambda = 1 - // Symbolic lower bound: x_f >= 0 - // Concrete lower bound: x_f >= 0 + // Phase inactive + // Symbolic bound: 0 <= x_f <= 0 + // Concrete bound: 0 <= x_f <= 0 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = 0; + _ub[i] = 0; + _symbolicLb[i] = 0; _symbolicLowerBias[i] = 0; _lb[i] = 0; } + else + { + // ReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double weight = sourceUb / ( sourceUb - sourceLb ); + _symbolicUb[i] = weight; + _symbolicUpperBias[i] = -sourceLb * weight; + _ub[i] = sourceUb; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We use coeff + // to set the value of lambda. + _symbolicLb[i] = coeff; + _symbolicLowerBias[i] = 0; + _lb[i] = coeff * sourceLb; + } } log( Stringf( "Neuron%u LB: %f b + %f, UB: %f b + %f", i, @@ -114,9 +167,26 @@ void DeepPolyReLUElement::execute( const Map &deepP _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolyReLUElement::storePredecessorSymbolicBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; + } +} + void DeepPolyReLUElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyReLUElement.h b/src/nlr/DeepPolyReLUElement.h index 69b6b0ccdd..2a7bbc0be7 100644 --- a/src/nlr/DeepPolyReLUElement.h +++ b/src/nlr/DeepPolyReLUElement.h @@ -33,6 +33,8 @@ class DeepPolyReLUElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyRoundElement.cpp b/src/nlr/DeepPolyRoundElement.cpp index fbb9565f61..fbfb09afa7 100644 --- a/src/nlr/DeepPolyRoundElement.cpp +++ b/src/nlr/DeepPolyRoundElement.cpp @@ -82,9 +82,26 @@ void DeepPolyRoundElement::execute( const Map &deep _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolyRoundElement::storePredecessorSymbolicBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; + } +} + void DeepPolyRoundElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyRoundElement.h b/src/nlr/DeepPolyRoundElement.h index b213c97e0e..d82695e59e 100644 --- a/src/nlr/DeepPolyRoundElement.h +++ b/src/nlr/DeepPolyRoundElement.h @@ -33,6 +33,8 @@ class DeepPolyRoundElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolySigmoidElement.cpp b/src/nlr/DeepPolySigmoidElement.cpp index 0aee3e1889..1f290b0210 100644 --- a/src/nlr/DeepPolySigmoidElement.cpp +++ b/src/nlr/DeepPolySigmoidElement.cpp @@ -99,9 +99,26 @@ void DeepPolySigmoidElement::execute( _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolySigmoidElement::storePredecessorSymbolicBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; + } +} + void DeepPolySigmoidElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolySigmoidElement.h b/src/nlr/DeepPolySigmoidElement.h index 3c9d090f95..b93255c033 100644 --- a/src/nlr/DeepPolySigmoidElement.h +++ b/src/nlr/DeepPolySigmoidElement.h @@ -33,6 +33,8 @@ class DeepPolySigmoidElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolySignElement.cpp b/src/nlr/DeepPolySignElement.cpp index 200b6e40f9..432f04e1c4 100644 --- a/src/nlr/DeepPolySignElement.cpp +++ b/src/nlr/DeepPolySignElement.cpp @@ -46,47 +46,103 @@ void DeepPolySignElement::execute( const Map &deepP double sourceLb = predecessor->getLowerBound( sourceIndex._neuron ); double sourceUb = predecessor->getUpperBound( sourceIndex._neuron ); - if ( !FloatUtils::isNegative( sourceLb ) ) + if ( !_useParameterisedSBT ) { - // Phase positive - // Symbolic bound: 1 <= x_f <= 1 - // Concrete bound: 1 <= x_f <= 1 - _symbolicUb[i] = 0; - _symbolicUpperBias[i] = 1; - _ub[i] = 1; - - _symbolicLb[i] = 0; - _symbolicLowerBias[i] = 1; - _lb[i] = 1; - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - // Phase negative - // Symbolic bound: -1 <= x_f <= -1 - // Concrete bound: -1 <= x_f <= -1 - _symbolicUb[i] = 0; - _symbolicUpperBias[i] = -1; - _ub[i] = -1; - - _symbolicLb[i] = 0; - _symbolicLowerBias[i] = -1; - _lb[i] = -1; + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase positive + // Symbolic bound: 1 <= x_f <= 1 + // Concrete bound: 1 <= x_f <= 1 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = 1; + _ub[i] = 1; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = 1; + _lb[i] = 1; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // Phase negative + // Symbolic bound: -1 <= x_f <= -1 + // Concrete bound: -1 <= x_f <= -1 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = -1; + _ub[i] = -1; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = -1; + _lb[i] = -1; + } + else + { + // Sign not fixed + // Use the relaxation defined in https://arxiv.org/pdf/2011.02948.pdf + // Symbolic upper bound: x_f <= -2 / l * x_b + 1 + // Concrete upper bound: x_f <= 1 + _symbolicUb[i] = -2 / sourceLb; + _symbolicUpperBias[i] = 1; + _ub[i] = 1; + + // Symbolic lower bound: x_f >= (2 / u) * x_b - 1 + // Concrete lower bound: x_f >= -1 + _symbolicLb[i] = 2 / sourceUb; + _symbolicLowerBias[i] = -1; + _lb[i] = -1; + } } else { - // Sign not fixed - // Use the relaxation defined in https://arxiv.org/pdf/2011.02948.pdf - // Symbolic upper bound: x_f <= -2 / l * x_b + 1 - // Concrete upper bound: x_f <= 1 - _symbolicUb[i] = -2 / sourceLb; - _symbolicUpperBias[i] = 1; - _ub[i] = 1; - - // Symbolic lower bound: x_f >= (2 / u) * x_b - 1 - // Concrete lower bound: x_f >= -1 - _symbolicLb[i] = 2 / sourceUb; - _symbolicLowerBias[i] = -1; - _lb[i] = -1; + Vector coeffs = ( *_layerIndicesToParameters )[_layerIndex]; + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase positive + // Symbolic bound: 1 <= x_f <= 1 + // Concrete bound: 1 <= x_f <= 1 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = 1; + _ub[i] = 1; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = 1; + _lb[i] = 1; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // Phase negative + // Symbolic bound: -1 <= x_f <= -1 + // Concrete bound: -1 <= x_f <= -1 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = -1; + _ub[i] = -1; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = -1; + _lb[i] = -1; + } + else + { + // Sign not fixed + // The upper bound's phase is not fixed, use parameterised + // parallelogram approximation: y <= - 2 / l * coeffs[0] * x + 1 + // (varies continuously between y <= 1 and y <= -2 / l * x + 1). + // Concrete upper bound: x_f <= 1 + _symbolicUb[i] = -2.0 / sourceLb * coeffs[0]; + _symbolicUpperBias[i] = 1; + _ub[i] = 1; + + // The lower bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / u * coeffs[1] * x - 1 + // (varies continuously between y >= -1 and y >= 2 / u * x - 1). + // Symbolic lower bound: x_f >= (2 / u) * x_b - 1 + // Concrete lower bound: x_f >= -1 + _symbolicLb[i] = 2.0 / sourceUb * coeffs[1]; + _symbolicLowerBias[i] = -1; + _lb[i] = -1; + } } log( Stringf( "Neuron%u LB: %f b + %f, UB: %f b + %f", i, @@ -96,9 +152,26 @@ void DeepPolySignElement::execute( const Map &deepP _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolySignElement::storePredecessorSymbolicBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; + } +} + void DeepPolySignElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolySignElement.h b/src/nlr/DeepPolySignElement.h index 53eb9edd4a..e699391806 100644 --- a/src/nlr/DeepPolySignElement.h +++ b/src/nlr/DeepPolySignElement.h @@ -33,6 +33,8 @@ class DeepPolySignElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolySoftmaxElement.cpp b/src/nlr/DeepPolySoftmaxElement.cpp index 2b6acb9c1c..d6d8713fd5 100644 --- a/src/nlr/DeepPolySoftmaxElement.cpp +++ b/src/nlr/DeepPolySoftmaxElement.cpp @@ -89,8 +89,8 @@ void DeepPolySoftmaxElement::execute( } } - double lb = linearLowerBound( sourceLbs, sourceUbs, index ); - double ub = linearUpperBound( sourceLbs, sourceUbs, index ); + double lb = Layer::linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = Layer::linearUpperBound( sourceLbs, sourceUbs, index ); if ( lb > _lb[i] ) _lb[i] = lb; if ( ub < _ub[i] ) @@ -124,11 +124,11 @@ void DeepPolySoftmaxElement::execute( if ( !useLSE2 ) { _symbolicLowerBias[i] = - LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = Layer::dLSELowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); _symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -137,23 +137,24 @@ void DeepPolySoftmaxElement::execute( else { _symbolicLowerBias[i] = - LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = Layer::dLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); _symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } } - _symbolicUpperBias[i] = LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { - double dudj = - dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = Layer::dLSEUpperbound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); _symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -161,23 +162,25 @@ void DeepPolySoftmaxElement::execute( } else if ( _boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) { - _symbolicLowerBias[i] = ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + _symbolicLowerBias[i] = + Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dldj = - dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); _symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } - _symbolicUpperBias[i] = ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dudj = - dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); _symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -185,9 +188,38 @@ void DeepPolySoftmaxElement::execute( } } } + + if ( _storePredecessorSymbolicBounds ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolySoftmaxElement::storePredecessorSymbolicBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + List sources = _layer->getActivationSources( i ); + unsigned inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + ( *_predecessorSymbolicLb )[_layerIndex][_size * inputIndex + i] = + _symbolicLb[_size * sourceIndex._neuron + i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size * inputIndex + i] = + _symbolicUb[_size * sourceIndex._neuron + i]; + ++inputIndex; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; + } +} + void DeepPolySoftmaxElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, @@ -280,7 +312,6 @@ void DeepPolySoftmaxElement::symbolicBoundInTermsOfPredecessor( predecessor->getLayerIndex() ) ); } - void DeepPolySoftmaxElement::allocateMemory( unsigned maxLayerSize ) { freeMemoryIfNeeded(); @@ -334,317 +365,6 @@ void DeepPolySoftmaxElement::freeMemoryIfNeeded() } } -double DeepPolySoftmaxElement::LSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputs[j]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - return std::exp( inputs[i] ) / sum; -} - -double DeepPolySoftmaxElement::dLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = 0; - if ( i == di ) - val += LSELowerBound( inputMids, inputLbs, inputUbs, i ); - - double ldi = inputLbs[di]; - double udi = inputUbs[di]; - - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputMids[j]; - - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / - ( udi - ldi ); - - return val; -} - -double DeepPolySoftmaxElement::LSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } - - if ( maxInputIndex == i ) - return ERLowerBound( inputMids, inputLbs, inputUbs, i ); - else - { - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - - return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; - } -} - -double DeepPolySoftmaxElement::dLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } - - if ( maxInputIndex == i ) - return dERLowerBound( inputMids, inputLbs, inputUbs, i, di ); - else - { - double val = LSELowerBound2( inputMids, inputLbs, inputUbs, i ); - - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); - - if ( i == di ) - { - double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; - return val - - val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); - } - else if ( maxInputIndex == di ) - { - double sum2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - continue; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); - } - } - return -val + val2 * sum2; - } - else - { - double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; - return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / - ( udijstar - ldijstar ); - } - } -} - -double DeepPolySoftmaxElement::LSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - return ( ( li * std::log( ui ) - ui * std::log( li ) ) / ( std::log( ui ) - std::log( li ) ) - - ( ui - li ) / ( std::log( ui ) - std::log( li ) ) * - SoftmaxConstraint::logSumOfExponential( inputTilda ) ); -} - -double DeepPolySoftmaxElement::dLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - double val = -( ui - li ) / ( std::log( ui ) - std::log( li ) ); - - double val2 = std::exp( inputMids[di] ) / SoftmaxConstraint::sumOfExponential( inputMids ); - if ( i == di ) - val2 -= 1; - - return val * val2; -} - -double DeepPolySoftmaxElement::ERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) - { - if ( i == j ) - sum += 1; - else - { - double ljTilda = inputLbs[j] - inputUbs[i]; - double ujTilda = inputUbs[j] - inputLbs[i]; - double xjTilda = inputTilda[j]; - - sum += ( ujTilda - xjTilda ) / ( ujTilda - ljTilda ) * std::exp( ljTilda ) + - ( xjTilda - ljTilda ) / ( ujTilda - ljTilda ) * std::exp( ujTilda ); - } - } - - return 1 / sum; -} - -double DeepPolySoftmaxElement::dERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = ERLowerBound( inputMids, inputLbs, inputUbs, i ); - - if ( i != di ) - { - double ldiTilda = inputLbs[di] - inputUbs[i]; - double udiTilda = inputUbs[di] - inputLbs[i]; - return -val * val * ( std::exp( udiTilda ) - std::exp( ldiTilda ) ) / - ( udiTilda - ldiTilda ); - } - else - { - double val2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j != i ) - { - double ljTilda = inputLbs[j] - inputUbs[i]; - double ujTilda = inputUbs[j] - inputLbs[i]; - val2 += ( std::exp( ujTilda ) - std::exp( ljTilda ) ) / ( ujTilda - ljTilda ); - } - } - return val * val * val2; - } -} - -double DeepPolySoftmaxElement::ERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); -} - -double DeepPolySoftmaxElement::dERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - - if ( i == di ) - { - double val2 = -1; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - val2 += std::exp( inputMids[j] - inputMids[i] ); - return li * ui * val2; - } - else - return -li * ui * std::exp( inputMids[di] - inputMids[i] ); -} - -double DeepPolySoftmaxElement::linearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector uTilda; - SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); - uTilda[i] = 0; - return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); -} - -double DeepPolySoftmaxElement::linearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector lTilda; - SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); - lTilda[i] = 0; - return 1 / SoftmaxConstraint::sumOfExponential( lTilda ); -} - void DeepPolySoftmaxElement::log( const String &message ) { if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) diff --git a/src/nlr/DeepPolySoftmaxElement.h b/src/nlr/DeepPolySoftmaxElement.h index a7b2220220..ee004a64a2 100644 --- a/src/nlr/DeepPolySoftmaxElement.h +++ b/src/nlr/DeepPolySoftmaxElement.h @@ -33,6 +33,9 @@ class DeepPolySoftmaxElement : public DeepPolyElement ~DeepPolySoftmaxElement(); void execute( const Map &deepPolyElements ); + + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, @@ -42,61 +45,6 @@ class DeepPolySoftmaxElement : public DeepPolyElement unsigned targetLayerSize, DeepPolyElement *predecessor ); - // The following methods compute concrete softmax output bounds - // using different linear approximation, as well as the coefficients - // of softmax inputs in the symbolic bounds - static double LSELowerBound( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex ); - static double dLSELowerBound( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex, - unsigned inputIndex ); - static double LSELowerBound2( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex ); - static double dLSELowerBound2( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex, - unsigned inputIndex ); - static double LSEUpperBound( const Vector &sourceMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned outputIndex ); - static double dLSEUpperbound( const Vector &sourceMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned outputIndex, - unsigned inputIndex ); - static double ERLowerBound( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex ); - static double dERLowerBound( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex, - unsigned inputIndex ); - static double ERUpperBound( const Vector &sourceMids, - const Vector &outputLbs, - const Vector &outputUbs, - unsigned outputIndex ); - static double dERUpperBound( const Vector &sourceMids, - const Vector &outputLbs, - const Vector &outputUbs, - unsigned outputIndex, - unsigned inputIndex ); - static double linearLowerBound( const Vector &outputLbs, - const Vector &outputUbs, - unsigned outputIndex ); - static double linearUpperBound( const Vector &outputLbs, - const Vector &outputUbs, - unsigned outputIndex ); - private: SoftmaxBoundType _boundType; unsigned _maxLayerSize; diff --git a/src/nlr/DeepPolyWeightedSumElement.cpp b/src/nlr/DeepPolyWeightedSumElement.cpp index 31a5c297b0..19c0f99fcd 100644 --- a/src/nlr/DeepPolyWeightedSumElement.cpp +++ b/src/nlr/DeepPolyWeightedSumElement.cpp @@ -102,6 +102,19 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( _workSymbolicUpperBias, currentElement, deepPolyElementsBefore ); + + if ( _storeOutputSymbolicBounds ) + { + precedingElement->storeOutputSymbolicBounds( _work1SymbolicLb, + _work1SymbolicUb, + _workSymbolicLowerBias, + _workSymbolicUpperBias, + _residualLb, + _residualUb, + _residualLayerIndices, + deepPolyElementsBefore ); + } + log( Stringf( "Computing symbolic bounds with respect to layer %u - done", predecessorIndex ) ); while ( currentElement->hasPredecessor() || !_residualLayerIndices.empty() ) @@ -229,6 +242,18 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( std::fill_n( _residualLb[newCurrentIndex], currentMatrixSize, 0 ); std::fill_n( _residualUb[newCurrentIndex], currentMatrixSize, 0 ); } + + if ( _storeOutputSymbolicBounds ) + { + precedingElement->storeOutputSymbolicBounds( _work1SymbolicLb, + _work1SymbolicUb, + _workSymbolicLowerBias, + _workSymbolicUpperBias, + _residualLb, + _residualUb, + _residualLayerIndices, + deepPolyElementsBefore ); + } } ASSERT( _residualLayerIndices.empty() ); log( "Computing bounds with back substitution - done" ); @@ -372,7 +397,6 @@ void DeepPolyWeightedSumElement::concretizeSymbolicBoundForSourceLayer( } } - void DeepPolyWeightedSumElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyWeightedSumElement.h b/src/nlr/DeepPolyWeightedSumElement.h index 0f8ca74ef3..447017f640 100644 --- a/src/nlr/DeepPolyWeightedSumElement.h +++ b/src/nlr/DeepPolyWeightedSumElement.h @@ -77,6 +77,13 @@ class DeepPolyWeightedSumElement : public DeepPolyElement const double *symbolicUpperBias, DeepPolyElement *sourceElement ); + void + storeOutputSymbolicBounds( unsigned sourceLayerSize, + Map &residualLb, + Map &residualUb, + Set &residualLayerIndices, + const Map &deepPolyElementsBefore ); + void allocateMemoryForResidualsIfNeeded( unsigned residualLayerIndex, unsigned residualLayerSize ); void allocateMemory(); diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c7619f4d1c..1649b21a24 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -1,8 +1,8 @@ /********************* */ -/*! \file NetworkLevelReasoner.cpp +/*! \file LPFormulator.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -248,8 +248,11 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, - bool backward ) +void LPFormulator::optimizeBoundsWithLpRelaxation( + const Map &layers, + bool backward, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -302,7 +305,8 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map &solverToIndex ); // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRlaxation( argument, backward ); + optimizeBoundsOfNeuronsWithLpRelaxation( + argument, backward, layerIndicesToParameters, polygonalTightenings ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -384,7 +388,7 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -522,9 +530,17 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, mtx.lock(); if ( backward ) - createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation ); + createLPRelaxationAfter( layers, + *freeSolver, + lastIndexOfRelaxation, + layerIndicesToParameters, + polygonalTightenings ); else - createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation ); + createLPRelaxation( layers, + *freeSolver, + lastIndexOfRelaxation, + layerIndicesToParameters, + polygonalTightenings ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -553,7 +569,6 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, } } - void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument &argument ) { try @@ -645,22 +660,36 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & } } -void LPFormulator::createLPRelaxation( const Map &layers, - GurobiWrapper &gurobi, - unsigned lastLayer ) +void LPFormulator::createLPRelaxation( + const Map &layers, + GurobiWrapper &gurobi, + unsigned lastLayer, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) { for ( const auto &layer : layers ) { - if ( layer.second->getLayerIndex() > lastLayer ) + unsigned currentLayerIndex = layer.second->getLayerIndex(); + if ( currentLayerIndex > lastLayer ) continue; - addLayerToModel( gurobi, layer.second, false ); + if ( layerIndicesToParameters.empty() ) + addLayerToModel( gurobi, layer.second, false ); + else + { + const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); + } } + addPolyognalTighteningsToLpRelaxation( gurobi, layers, 0, lastLayer, polygonalTightenings ); } -void LPFormulator::createLPRelaxationAfter( const Map &layers, - GurobiWrapper &gurobi, - unsigned firstLayer ) +void LPFormulator::createLPRelaxationAfter( + const Map &layers, + GurobiWrapper &gurobi, + unsigned firstLayer, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -678,7 +707,15 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers continue; else { - addLayerToModel( gurobi, currentLayer, true ); + if ( layerIndicesToParameters.empty() ) + addLayerToModel( gurobi, currentLayer, true ); + else + { + const Vector ¤tLayerCoeffs = + layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); + } + for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) { if ( layerToDepth.exists( nextLayer ) ) @@ -688,6 +725,8 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers } } } + addPolyognalTighteningsToLpRelaxation( + gurobi, layers, firstLayer, layersToAdd.top(), polygonalTightenings ); } void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, @@ -846,7 +885,6 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -911,7 +949,6 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -977,9 +1014,7 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, double lb = std::max( 0.0, layer->getLb( i ) ); gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - */ + // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). // y >= 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -994,7 +1029,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1027,7 +1061,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); @@ -1101,7 +1134,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1185,7 +1217,6 @@ void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1229,7 +1260,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - // Target is at least source: target - source >= 0 terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1267,7 +1297,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1317,11 +1346,9 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, } double ub = - std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), - layer->getUb( i ) ); + std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); double lb = - std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), - layer->getLb( i ) ); + std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); targetLbs[index] = lb; targetUbs[index] = ub; @@ -1330,8 +1357,6 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, double bias; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - - List terms; if ( FloatUtils::areEqual( lb, ub ) ) { @@ -1355,14 +1380,13 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound( - sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dLSELowerBound( + double dldj = Layer::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); @@ -1375,14 +1399,13 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dLSELowerBound2( + double dldj = Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); @@ -1394,15 +1417,14 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSEUpperBound( - sourceMids, targetLbs, targetUbs, index ); + bias = Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = DeepPolySoftmaxElement::dLSEUpperbound( + double dudj = Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; @@ -1414,16 +1436,15 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = - DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dERLowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = + Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); bias -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -1432,16 +1453,15 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = - DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = DeepPolySoftmaxElement::dERUpperBound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = + Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -1464,20 +1484,21 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -1526,10 +1547,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); // Upper bound: out <= u_y * x + l_x * y - l_x * u_y @@ -1537,16 +1558,15 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } } - void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1668,7 +1688,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, else { double width = sourceUb - sourceLb; - double coeff = ( sourceUb - slope * sourceLb ) / width; + double weight = ( sourceUb - slope * sourceLb ) / width; double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; /* @@ -1691,15 +1711,504 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + switch ( layer->getLayerType() ) + { + case Layer::RELU: + addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::LEAKY_RELU: + addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::SIGN: + addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::BILINEAR: + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + default: + addLayerToModel( gurobi, layer, createVariables ); + break; + } +} + +void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + double coeff = coeffs[0]; + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), 0, layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The ReLU is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The ReLU is inactive, y = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this ReLU is not yet fixed. + + For y = ReLU(x), we add the following relaxation: + + 1. y >= 0 + 2. y >= x + 2. y >= coeff * x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0. + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and + // y >= alpha * x). terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + /* + u ul + y <= ----- x - ----- + u - l u - l + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUb / ( sourceUb - sourceLb ), + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, + ( -sourceUb * sourceLb ) / ( sourceUb - sourceLb ) ); + } + } + } +} + +void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The Sign is positive, y = 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // The Sign is negative, y = -1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); + } + else + { + /* + The phase of this Sign is not yet fixed. + + For y = Sign(x), we add the following parallelogram relaxation: + + 1. y >= -1 + 2. y <= -1 + 3. y is below the line the crosses (x.lb,-1) and (0,1) + 4. y is above the line the crosses (0,-1) and (x.ub,1) + */ + + // -1 <= y <= 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); + + /* + 2 + y <= ----- * coeff[0] * x + 1 + - l + Varies continuously between y <= 1 and y <= -2/l * x + 1. + */ + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * coeffs[0], + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 1 ); + + /* + 2 + y >= ----- * coeffs[1] * x - 1 + u + Varies continuously between y >= -1 and y >= 2/u * x - 1. + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -2.0 / sourceUb * coeffs[1], + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -1 ); + } + } +} + +void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + double slope = layer->getAlpha(); + double coeff = coeffs[0]; + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The LeakyReLU is active, y = x + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The LeakyReLU is inactive, y = alpha * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + double width = sourceUb - sourceLb; + double weight = ( sourceUb - slope * sourceLb ) / width; + double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + /* + The phase of this LeakyReLU is not yet fixed. + For y = LeakyReLU(x), we add the following relaxation: + 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= + alpha * x and y >= x). + 2. y >= x + 3. y >= alpha * x + 4. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= ((1 - alpha) * coeff + alpha) * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= alpha * x, i.e. y - alpha * x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, bias ); } } } } +void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayers; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= a_l * x + b_l * y + c_l, where + // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= a_u * x + b_u * y + c_u, where + // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, + -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, + -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); + } + } +} + +void LPFormulator::addPolyognalTighteningsToLpRelaxation( + GurobiWrapper &gurobi, + const Map &layers, + unsigned firstLayer, + unsigned lastLayer, + const Vector &polygonalTightenings ) +{ + List terms; + for ( const auto &tightening : polygonalTightenings ) + { + Map neuronToCoefficient = tightening._neuronToCoefficient; + PolygonalTightening::PolygonalBoundType type = tightening._type; + double value = tightening._value; + + bool outOfBounds = false; + for ( const auto &pair : neuronToCoefficient ) + { + unsigned layerIndex = pair.first._layer; + if ( layerIndex < firstLayer || layerIndex > lastLayer ) + { + outOfBounds = true; + } + } + if ( outOfBounds ) + { + continue; + } + + terms.clear(); + for ( const auto &pair : neuronToCoefficient ) + { + unsigned layerIndex = pair.first._layer; + unsigned neuron = pair.first._neuron; + double coeff = pair.second; + Layer *layer = layers[layerIndex]; + + if ( !layer->neuronEliminated( neuron ) ) + { + const String variableName = Stringf( "x%u", layer->neuronToVariable( neuron ) ); + if ( !gurobi.containsVariable( variableName ) ) + { + gurobi.addVariable( + variableName, layer->getLb( neuron ), layer->getUb( neuron ) ); + } + terms.append( GurobiWrapper::Term( coeff, variableName ) ); + } + else + { + value -= coeff * layer->getEliminatedNeuronValue( neuron ); + } + } + + if ( type == PolygonalTightening::UB ) + { + gurobi.addLeqConstraint( terms, value ); + } + else + { + gurobi.addGeqConstraint( terms, value ); + } + } +} + void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cee0abbb65..b983a48267 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -2,7 +2,7 @@ /*! \file LPFormulator.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -20,6 +20,7 @@ #include "LayerOwner.h" #include "Map.h" #include "ParallelSolver.h" +#include "PolygonalTightening.h" #include #include @@ -51,8 +52,13 @@ class LPFormulator : public ParallelSolver LP model is adjusted from the previous call, instead of being constructed from scratch */ - void optimizeBoundsWithLpRelaxation( const Map &layers, - bool backward = false ); + void + optimizeBoundsWithLpRelaxation( const Map &layers, + bool backward = false, + const Map> &layerIndicesToParameters = + Map>(), + const Vector &polygonalTightenings = + Vector( {} ) ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); void optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ); @@ -72,10 +78,18 @@ class LPFormulator : public ParallelSolver */ void createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, - unsigned lastLayer = UINT_MAX ); + unsigned lastLayer = UINT_MAX, + const Map> &layerIndicesToParameters = + Map>(), + const Vector &polygonalTightenings = + Vector( {} ) ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, - unsigned firstLayer ); + unsigned firstLayer, + const Map> &layerIndicesToParameters = + Map>(), + const Vector &polygonalTightenings = + Vector( {} ) ); double solveLPRelaxation( GurobiWrapper &gurobi, const Map &layers, MinOrMax minOrMax, @@ -127,7 +141,46 @@ class LPFormulator : public ParallelSolver const Layer *layer, bool createVariables ); - void optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, bool backward ); + void optimizeBoundsOfNeuronsWithLpRelaxation( + ThreadArgument &args, + bool backward, + const Map> &layerIndicesToParameters = + Map>(), + const Vector &polygonalTightenings = + Vector( {} ) ); + + // Create LP relaxations depending on external parameters. + void addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addPolyognalTighteningsToLpRelaxation( + GurobiWrapper &gurobi, + const Map &layers, + unsigned firstLayer, + unsigned lastLayer, + const Vector &polygonalTightenings ); /* Optimize for the min/max value of variableName with respect to the constraints diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 08e6900538..c0a3dc6abd 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2,7 +2,7 @@ /*! \file Layer.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -75,7 +75,11 @@ void Layer::allocateMemory() _inputLayerSize = ( _type == INPUT ) ? _size : _layerOwner->getLayer( 0 )->getSize(); if ( Options::get()->getSymbolicBoundTighteningType() == - SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) + SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; @@ -926,23 +930,23 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - double new_lb; - double new_ub; + double newLb; + double newUb; if ( !FloatUtils::isNegative( lb ) ) { - new_lb = 1; - new_ub = 1; + newLb = 1; + newUb = 1; } else if ( FloatUtils::isNegative( ub ) ) { - new_lb = -1; - new_ub = -1; + newLb = -1; + newUb = -1; } else { - new_lb = -1; - new_ub = 1; + newLb = -1; + newUb = 1; } /* @@ -950,16 +954,16 @@ void Layer::computeIntervalArithmeticBoundsForSign() variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < new_lb ) + if ( _lb[i] < newLb ) { - _lb[i] = new_lb; + _lb[i] = newLb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > new_ub ) + if ( _ub[i] > newUb ) { - _ub[i] = new_ub; + _ub[i] = newUb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } @@ -1045,7 +1049,6 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() double lbSigmoid = SigmoidConstraint::sigmoid( lb ); double ubSigmoid = SigmoidConstraint::sigmoid( ub ); - if ( _lb[i] < lbSigmoid ) { _lb[i] = lbSigmoid; @@ -1061,7 +1064,6 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() } } - void Layer::computeIntervalArithmeticBoundsForRound() { for ( unsigned i = 0; i < _size; ++i ) @@ -1078,7 +1080,6 @@ void Layer::computeIntervalArithmeticBoundsForRound() double lbRound = FloatUtils::round( lb ); double ubRound = FloatUtils::round( ub ); - if ( _lb[i] < lbRound ) { _lb[i] = lbRound; @@ -1094,7 +1095,6 @@ void Layer::computeIntervalArithmeticBoundsForRound() } } - void Layer::computeIntervalArithmeticBoundsForMax() { for ( unsigned i = 0; i < _size; ++i ) @@ -1225,8 +1225,8 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() } } - double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); - double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + double lb = linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = linearUpperBound( sourceLbs, sourceUbs, index ); if ( _lb[i] < lb ) { _lb[i] = lb; @@ -1253,14 +1253,13 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1314,7 +1313,6 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() ub = v; } - if ( _lb[i] < lb ) { _lb[i] = lb; @@ -1726,7 +1724,6 @@ void Layer::computeSymbolicBoundsForSign() for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicUb[j * _size + i] *= factor; - // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= factor; _symbolicUpperBias[i] += 1; @@ -2047,20 +2044,19 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) // Concrete upper bound: x_f <= ub_b double width = sourceUb - sourceLb; - double coeff = ( sourceUb - _alpha * sourceLb ) / width; + double weight = ( sourceUb - _alpha * sourceLb ) / width; if ( _alpha <= 1 ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicUb[j * _size + i] *= coeff; + _symbolicUb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= coeff; + _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - // For the lower bound, in general, x_f >= lambda * x_b, where // 0 <= lambda <= 1, would be a sound lower bound. We // use the heuristic described in section 4.1 of @@ -2092,11 +2088,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicLb[j * _size + i] *= coeff; + _symbolicLb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= coeff; + _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; if ( sourceUb > sourceLb ) @@ -2209,7 +2205,6 @@ void Layer::computeSymbolicBoundsForLeakyRelu() } } - void Layer::computeSymbolicBoundsForSigmoid() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); @@ -2269,7 +2264,7 @@ void Layer::computeSymbolicBoundsForSigmoid() double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); // Case when the Sigmoid constraint is fixed - if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -2341,7 +2336,6 @@ void Layer::computeSymbolicBoundsForSigmoid() _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; } - /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -2466,7 +2460,6 @@ void Layer::computeSymbolicBoundsForRound() _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - // Bounds of lb, ub are the rounded values of source lb, ub double sourceUbRound = FloatUtils::round( sourceUb ); double sourceLbRound = FloatUtils::round( sourceLb ); @@ -2476,7 +2469,6 @@ void Layer::computeSymbolicBoundsForRound() _symbolicLbOfLb[i] = sourceLbRound; _symbolicUbOfLb[i] = sourceLbRound; - // Case when the Round constraint is fixed if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) { @@ -2610,7 +2602,6 @@ void Layer::computeSymbolicBoundsForMax() _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; - _symbolicLbOfLb[i] = maxLowerBound; _symbolicUbOfLb[i] = maxLowerBound; _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; @@ -2706,7 +2697,6 @@ void Layer::computeSymbolicBoundsForSoftmax() Vector sourceMids; Vector targetLbs; Vector targetUbs; - unsigned len = 0; for ( const auto &sourceIndex : sources ) { unsigned sourceNeuron = sourceIndex._neuron; @@ -2718,8 +2708,6 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); - - ++len; } // Find the index of i in the softmax @@ -2735,8 +2723,8 @@ void Layer::computeSymbolicBoundsForSoftmax() } } - double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); - double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + double lb = linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = linearUpperBound( sourceLbs, sourceUbs, index ); if ( _lb[i] < lb ) { _lb[i] = lb; @@ -2758,8 +2746,8 @@ void Layer::computeSymbolicBoundsForSoftmax() _symbolicUpperBias[i] = _ub[i]; for ( const auto &sourceIndex : sources ) { - symbolicLb[len * sourceIndex._neuron + i] = 0; - symbolicUb[len * sourceIndex._neuron + i] = 0; + symbolicLb[_size * sourceIndex._neuron + i] = 0; + symbolicUb[_size * sourceIndex._neuron + i] = 0; } } else @@ -2777,12 +2765,12 @@ void Layer::computeSymbolicBoundsForSoftmax() if ( !useLSE2 ) { _symbolicLowerBias[i] = - softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = softmaxdLSELowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + double dldj = + dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2790,51 +2778,48 @@ void Layer::computeSymbolicBoundsForSoftmax() else { _symbolicLowerBias[i] = - softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = softmaxdLSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + double dldj = + dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } } - _symbolicUpperBias[i] = - softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { - double dudj = softmaxdLSEUpperbound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + double dudj = + dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } } else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) { - _symbolicLowerBias[i] = - softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + _symbolicLowerBias[i] = ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dldj = - softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } - _symbolicUpperBias[i] = - softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dudj = - softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -3036,27 +3021,26 @@ void Layer::computeSymbolicBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -3067,16 +3051,6 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -3106,47 +3080,75 @@ void Layer::computeSymbolicBoundsForBilinear() // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + double aLower = sourceLbs[1]; + double aUpper = sourceUbs[1]; + double bLower = sourceLbs[0]; + double bUpper = sourceLbs[0]; + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( sourceLbs[1] >= 0 ) + if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } - if ( sourceUbs[1] >= 0 ) + if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { - _symbolicLb[j * _size + i] += - sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } - if ( sourceLbs[0] >= 0 ) + if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + + if ( bUpper >= 0 ) + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + else + { _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); @@ -3327,7 +3329,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } - /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -3397,191 +3398,1104 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +void Layer::computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive ) { - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) + switch ( _type ) { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputs[j]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - return std::exp( inputs[i] ) / sum; -} + case RELU: + computeParameterisedSymbolicBoundsForRelu( coeffs, receive ); + break; -double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = 0; - if ( i == di ) - val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + case SIGN: + computeParameterisedSymbolicBoundsForSign( coeffs, receive ); + break; - double ldi = inputLbs[di]; - double udi = inputUbs[di]; + case LEAKY_RELU: + computeParameterisedSymbolicBoundsForLeakyRelu( coeffs, receive ); + break; - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputMids[j]; + case BILINEAR: + computeParameterisedSymbolicBoundsForBilinear( coeffs, receive ); + break; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + default: + computeSymbolicBounds(); + break; } - - val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / - ( udi - ldi ); - - return val; } -double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +void Layer::computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ) { - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) + ASSERT( coeffs.size() == 1 ); + + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) { - if ( mid > max ) + if ( _eliminatedNeurons.exists( i ) ) { - max = mid; - maxInputIndex = index; + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; } - ++index; } - if ( maxInputIndex == i ) - return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); - else + for ( unsigned i = 0; i < _size; ++i ) { - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + if ( _eliminatedNeurons.exists( i ) ) + continue; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } + /* + There are two ways we can determine that a ReLU has become fixed: - return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; - } -} + 1. If the ReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus reluPhase = PHASE_NOT_FIXED; -double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + reluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + reluPhase = RELU_PHASE_INACTIVE; - if ( maxInputIndex == i ) - return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); - else - { - double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) + /* + A ReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; } - double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - if ( i == di ) + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) { - double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; - return val - - val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + reluPhase = RELU_PHASE_ACTIVE; } - else if ( maxInputIndex == di ) + else if ( !FloatUtils::isPositive( sourceUb ) ) { - double sum2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - continue; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); - } - } - return -val + val2 * sum2; + reluPhase = RELU_PHASE_INACTIVE; } - else + + if ( reluPhase == PHASE_NOT_FIXED ) { - double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; - return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / - ( udijstar - ldijstar ); - } - } -} + // If we got here, we know that lbLb < 0 and ubUb + // > 0 There are four possible cases, depending on + // whether ubLb and lbUb are negative or positive + // (see Neurify paper, page 14). -double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; + // Upper bound + if ( _symbolicLbOfUb[i] <= 0 ) + { + // lbOfUb[i] < 0 < ubOfUb[i] + // Concretize the upper bound using the Ehler's-like approximation + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + } + + // Lower bound: y >= coeff * x (varies continuously between y >= 0 and y >= alpha * x). + if ( _symbolicUbOfLb[i] <= 0 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] *= coeff; + + _symbolicLowerBias[i] *= coeff; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + } + + _symbolicLbOfLb[i] = 0; + } + else + { + // The phase of this ReLU is fixed! + if ( reluPhase == RELU_PHASE_ACTIVE ) + { + // Active ReLU, bounds are propagated as is + } + else + { + // Inactive ReLU, returns zero + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = 0; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; + } + } + + if ( _symbolicLbOfUb[i] < 0 ) + _symbolicLbOfUb[i] = 0; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ) +{ + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + // Eliminate neurons are skipped + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + + continue; + } + + /* + There are two ways we can determine that a Sign has become fixed: + + 1. If the Sign's variable has been externally fixed + 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) + */ + PhaseStatus signPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( !FloatUtils::isNegative( _lb[i] ) ) + signPhase = SIGN_PHASE_POSITIVE; + else if ( FloatUtils::isNegative( _ub[i] ) ) + signPhase = SIGN_PHASE_NEGATIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sign initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + signPhase = SIGN_PHASE_POSITIVE; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + signPhase = SIGN_PHASE_NEGATIVE; + } + + if ( signPhase == PHASE_NOT_FIXED ) + { + PhaseStatus upperSignPhase = PHASE_NOT_FIXED; + PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; + + // If we got here, we know that lbLb < 0 and ubUb + // > 0 + + // Upper bound + if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) + { + // The upper bound is strictly positive - turns into + // the constant 1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = 0; + + _symbolicUpperBias[i] = 1; + + upperSignPhase = SIGN_PHASE_POSITIVE; + } + else + { + // The upper bound's phase is not fixed, use parameterised + // parallelogram approximation: y <= - 2 / l * coeffs[0] * x + 1 + // (varies continuously between y <= 1 and y <= -2 / l * x + 1). + double factor = -2.0 / _symbolicLbOfLb[i] * coeffs[0]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] *= factor; + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= factor; + _symbolicUpperBias[i] += 1; + } + + // Lower bound + if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) + { + // The lower bound is strictly negative - turns into + // the constant -1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = 0; + + _symbolicLowerBias[i] = -1; + + lowerSignPhase = SIGN_PHASE_NEGATIVE; + } + else + { + // The lower bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / u * coeffs[1] * x - 1 + // (varies continuously between y >= -1 and y >= 2 / u * x - 1). + double factor = 2.0 / _symbolicUbOfUb[i] * coeffs[1]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= factor; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= factor; + _symbolicLowerBias[i] -= 1; + } + + if ( upperSignPhase == PHASE_NOT_FIXED ) + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; + } + else + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + + if ( lowerSignPhase == PHASE_NOT_FIXED ) + { + _symbolicUbOfLb[i] = 1; + _symbolicLbOfLb[i] = -1; + } + else + { + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; + } + } + else + { + // The phase of this Sign is fixed! + double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; + + _symbolicLbOfLb[i] = constant; + _symbolicUbOfLb[i] = constant; + _symbolicLbOfUb[i] = constant; + _symbolicUbOfUb[i] = constant; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = constant; + _symbolicUpperBias[i] = constant; + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the sign + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, + bool receive ) +{ + ASSERT( _alpha > 0 && _alpha < 1 ); + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a LeakyReLU has become fixed: + + 1. If the LeakyReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + leakyReluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + leakyReluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A LeakyReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + leakyReluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + leakyReluPhase = RELU_PHASE_INACTIVE; + } + + if ( leakyReluPhase == PHASE_NOT_FIXED ) + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _alpha * sourceLb ) / width; + + if ( _alpha <= 1 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= weight; + _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + _symbolicLowerBias[i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= weight; + _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + _symbolicUpperBias[i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + else + { + // The phase of this LeakyReLU is fixed! + if ( leakyReluPhase == RELU_PHASE_ACTIVE ) + { + // Positive LeakyReLU, bounds are propagated as is + } + else + { + // Negative LeakyReLU, bounds are multiplied by _alpha + _symbolicLbOfLb[i] *= _alpha; + _symbolicUbOfLb[i] *= _alpha; + _symbolicLbOfUb[i] *= _alpha; + _symbolicUbOfUb[i] *= _alpha; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + _symbolicUpperBias[i] *= _alpha; + } + } + + if ( _symbolicUbOfUb[i] > sourceUb ) + _symbolicUbOfUb[i] = sourceUb; + if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) + _symbolicLbOfLb[i] = _alpha * sourceLb; + + /* + We now have the tightest bounds we can for the leakyRelu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, + bool receive ) +{ + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; + _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; + continue; + } + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= aLower * x + bLower * y + c_l, where + // aLower = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // bLower = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // cLower = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= aUpper * x + bUpper * y + c_u, where + // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // cUpper = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + double aLower = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + double aUpper = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + if ( aLower >= 0 ) + { + _symbolicLb[j * _size + i] += + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; + } + else + { + _symbolicLb[j * _size + i] += + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; + } + + if ( aUpper >= 0 ) + { + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; + } + else + { + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; + } + + if ( bLower >= 0 ) + { + _symbolicLb[j * _size + i] += + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; + } + else + { + _symbolicLb[j * _size + i] += + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + + if ( bUpper >= 0 ) + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + else + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; + } + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +double Layer::LSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputs[j]; + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + return std::exp( inputs[i] ) / sum; +} + +double Layer::dLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = 0; + if ( i == di ) + val += LSELowerBound( inputMids, inputLbs, inputUbs, i ); + + double ldi = inputLbs[di]; + double udi = inputUbs[di]; + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputMids[j]; + + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / + ( udi - ldi ); + + return val; +} + +double Layer::LSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return ERLowerBound( inputMids, inputLbs, inputUbs, i ); + else + { + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + + return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; + } +} + +double Layer::dLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return dERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + else + { + double val = LSELowerBound2( inputMids, inputLbs, inputUbs, i ); + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + + if ( i == di ) + { + double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; + return val - + val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + } + else if ( maxInputIndex == di ) + { + double sum2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + continue; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); + } + } + return -val + val2 * sum2; + } + else + { + double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; + return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / + ( udijstar - ldijstar ); + } + } +} + +double Layer::LSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); return ( ( li * std::log( ui ) - ui * std::log( li ) ) / ( std::log( ui ) - std::log( li ) ) - ( ui - li ) / ( std::log( ui ) - std::log( li ) ) * SoftmaxConstraint::logSumOfExponential( inputTilda ) ); } -double Layer::softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) +double Layer::dLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -3595,10 +4509,10 @@ double Layer::softmaxdLSEUpperbound( const Vector &inputMids, return val * val2; } -double Layer::softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::ERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector inputTilda; SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); @@ -3622,13 +4536,13 @@ double Layer::softmaxERLowerBound( const Vector &inputs, return 1 / sum; } -double Layer::softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::dERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { - double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + double val = ERLowerBound( inputMids, inputLbs, inputUbs, i ); if ( i != di ) { @@ -3653,10 +4567,10 @@ double Layer::softmaxdERLowerBound( const Vector &inputMids, } } -double Layer::softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) +double Layer::ERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -3667,16 +4581,15 @@ double Layer::softmaxERUpperBound( const Vector &inputs, return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); } -double Layer::softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) +double Layer::dERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; - if ( i == di ) { double val2 = -1; @@ -3688,9 +4601,9 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, return -li * ui * std::exp( inputMids[di] - inputMids[i] ); } -double Layer::softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::linearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector uTilda; SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); @@ -3698,9 +4611,9 @@ double Layer::softmaxLinearLowerBound( const Vector &inputLbs, return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); } -double Layer::softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::linearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector lTilda; SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); @@ -3723,6 +4636,256 @@ void Layer::eliminateVariable( unsigned variable, double value ) _variableToNeuron.erase( variable ); } +const Vector Layer::getNonfixedNeurons() const +{ + Vector nonfixedNeurons = Vector( {} ); + for ( unsigned i = 0; i < _size; ++i ) + { + if ( neuronNonfixed( i ) ) + { + nonfixedNeurons.append( NeuronIndex( _layerIndex, i ) ); + } + } + const Vector neuronList = Vector( nonfixedNeurons ); + return neuronList; +} + +bool Layer::neuronNonfixed( unsigned neuron ) const +{ + if ( _eliminatedNeurons.exists( neuron ) ) + { + return false; + } + + switch ( _type ) + { + case INPUT: + case WEIGHTED_SUM: + { + return false; + break; + } + case RELU: + case LEAKY_RELU: + case SIGN: + case ABSOLUTE_VALUE: + { + return neuronNonfixedAtZero( neuron ); + break; + } + case SIGMOID: + { + return neuronNonfixedSigmoid( neuron ); + break; + } + case Layer::ROUND: + { + return neuronNonfixedRound( neuron ); + break; + } + case MAX: + { + return neuronNonfixedMax( neuron ); + break; + } + case SOFTMAX: + { + return neuronNonfixedSoftmax( neuron ); + break; + } + case BILINEAR: + { + return neuronNonfixedBilinear( neuron ); + break; + } + default: + { + printf( "Error! Neuron type %u unsupported\n", _type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + break; + } + } +} + +bool Layer::neuronNonfixedAtZero( unsigned neuron ) const +{ + // A Relu/Sign/Abs/Leaky Relu activation is non-fixed if it's not exernally fixed, + // its source neuron isn't externally fixed and sourceLb < 0 < sourceUb. + NeuronIndex sourceIndex = *_neuronToActivationSources[neuron].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + if ( sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + return false; + } + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + return FloatUtils::isNegative( sourceLb ) && FloatUtils::isPositive( sourceUb ); +} + +bool Layer::neuronNonfixedSigmoid( unsigned neuron ) const +{ + // A Sigmoid activation is non-fixed if it's not exernally fixed, + // its source neuron isn't externally fixed and sourceUb != sourceLb. + NeuronIndex sourceIndex = *_neuronToActivationSources[neuron].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + if ( sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + return false; + } + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + return !FloatUtils::areEqual( sourceLb, sourceUb ); +} + +bool Layer::neuronNonfixedRound( unsigned neuron ) const +{ + // A Round activation is non-fixed if it's not exernally fixed, + // its source neuron isn't externally fixed and round( sourceUb ) != round( sourceLb ). + NeuronIndex sourceIndex = *_neuronToActivationSources[neuron].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + if ( sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + return false; + } + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + return !FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ); +} + +bool Layer::neuronNonfixedMax( unsigned neuron ) const +{ + // A Max activation is non-fixed if not all its sources have a fixed value and no source + // has a lower bound larger than the upper-bounds of the other source variables. + List sources = getActivationSources( neuron ); + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + Map sourceUbs; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + if ( !sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + allConstant = false; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + sourceUbs[sourceIndex] = sourceUb; + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + } + } + + if ( allConstant ) + { + return false; + } + + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + return !phaseFixed; +} + +bool Layer::neuronNonfixedSoftmax( unsigned neuron ) const +{ + // A Softmax activation is non-fixed if not all its sources have a fixed value + // and its source neuron in the Softmax satisfies sourceLb != sourceUb. + List sources = getActivationSources( neuron ); + Vector sourceLbs; + Vector sourceUbs; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + if ( !sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + allConstant = false; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + } + } + + if ( allConstant ) + { + return false; + } + + unsigned selfIndex = 0; + Set handledInputNeurons; + for ( unsigned i = 0; i < neuron; ++i ) + { + for ( const auto &sourceIndex : getActivationSources( i ) ) + { + if ( !handledInputNeurons.exists( sourceIndex._neuron ) ) + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + } + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + { + ++selfIndex; + } + else + { + break; + } + } + + double lb = std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, selfIndex ), _lb[neuron] ); + double ub = std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, selfIndex ), _ub[neuron] ); + + return !FloatUtils::areEqual( lb, ub ); +} + +bool Layer::neuronNonfixedBilinear( unsigned neuron ) const +{ + // A Softmax activation is non-fixed if not all its sources have a fixed value. + List sources = getActivationSources( neuron ); + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + if ( sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + return false; + } + + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) + { + return false; + } + } + return true; +} + void Layer::updateVariableIndices( const Map &oldIndexToNewIndex, const Map &mergedVariables ) { @@ -4006,7 +5169,6 @@ String Layer::typeToString( Type type ) return "BILINEAR"; break; - default: return "UNKNOWN TYPE"; break; diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 900276eda3..f2ec9f5d42 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -2,7 +2,7 @@ /*! \file Layer.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -136,8 +136,75 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); - void computeSymbolicBounds(); void computeIntervalArithmeticBounds(); + void computeSymbolicBounds(); + void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); + + // Get all non-fixed neurons in a single layer. + const Vector getNonfixedNeurons() const; + + // Determine whether given non-linear activaton neuron has a non-fixed phase. + bool neuronNonfixed( unsigned neuron ) const; + + const double *getSymbolicLb() const; + const double *getSymbolicUb() const; + const double *getSymbolicLowerBias() const; + const double *getSymbolicUpperBias() const; + + // The following methods compute concrete softmax output bounds + // using different linear approximation, as well as the coefficients + // of softmax inputs in the symbolic bounds + static double LSELowerBound( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex ); + static double dLSELowerBound( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex, + unsigned inputIndex ); + static double LSELowerBound2( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex ); + static double dLSELowerBound2( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex, + unsigned inputIndex ); + static double LSEUpperBound( const Vector &sourceMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned outputIndex ); + static double dLSEUpperbound( const Vector &sourceMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned outputIndex, + unsigned inputIndex ); + static double ERLowerBound( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex ); + static double dERLowerBound( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex, + unsigned inputIndex ); + static double ERUpperBound( const Vector &sourceMids, + const Vector &outputLbs, + const Vector &outputUbs, + unsigned outputIndex ); + static double dERUpperBound( const Vector &sourceMids, + const Vector &outputLbs, + const Vector &outputUbs, + unsigned outputIndex, + unsigned inputIndex ); + static double linearLowerBound( const Vector &outputLbs, + const Vector &outputUbs, + unsigned outputIndex ); + static double linearUpperBound( const Vector &outputLbs, + const Vector &outputUbs, + unsigned outputIndex ); /* Preprocessing functionality: variable elimination and reindexing @@ -208,74 +275,6 @@ class Layer void allocateMemory(); void freeMemoryIfNeeded(); - /* - The following methods compute concrete softmax output bounds - using different linear approximation, as well as the coefficients - of softmax inputs in the symbolic bounds - */ - double softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ); - - double softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ); - - double softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ); - - double softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ); - - double softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - /* Helper functions for symbolic bound tightening */ @@ -292,6 +291,16 @@ class Layer void computeSymbolicBoundsForBilinear(); void computeSymbolicBoundsDefault(); + /* + Helper functions for parameterised symbolic bound tightening + */ + void computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, + bool receive ); + void computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, + bool receive ); + /* Helper functions for interval bound tightening */ @@ -306,10 +315,16 @@ class Layer void computeIntervalArithmeticBoundsForSoftmax(); void computeIntervalArithmeticBoundsForBilinear(); - const double *getSymbolicLb() const; - const double *getSymbolicUb() const; - const double *getSymbolicLowerBias() const; - const double *getSymbolicUpperBias() const; + /* + Helper functions for determining whether given non-linear activaton has a non-fixed phase. + */ + bool neuronNonfixedAtZero( unsigned neuron ) const; + bool neuronNonfixedSigmoid( unsigned neuron ) const; + bool neuronNonfixedRound( unsigned neuron ) const; + bool neuronNonfixedMax( unsigned neuron ) const; + bool neuronNonfixedSoftmax( unsigned neuron ) const; + bool neuronNonfixedBilinear( unsigned neuron ) const; + double getSymbolicLbOfLb( unsigned neuron ) const; double getSymbolicUbOfLb( unsigned neuron ) const; double getSymbolicLbOfUb( unsigned neuron ) const; diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index 180a4fdebd..99d08743b3 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -17,6 +17,7 @@ #define __LayerOwner_h__ #include "ITableau.h" +#include "PolygonalTightening.h" #include "Tightening.h" namespace NLR { @@ -35,6 +36,7 @@ class LayerOwner virtual const ITableau *getTableau() const = 0; virtual unsigned getNumberOfLayers() const = 0; virtual void receiveTighterBound( Tightening tightening ) = 0; + virtual void receivePolygonalTightening( PolygonalTightening &polygonalTightening ) = 0; }; } // namespace NLR diff --git a/src/nlr/NLRError.h b/src/nlr/NLRError.h index eb19fa05bd..18aca963ce 100644 --- a/src/nlr/NLRError.h +++ b/src/nlr/NLRError.h @@ -27,7 +27,8 @@ class NLRError : public Error INPUT_LAYER_NOT_THE_FIRST_LAYER = 2, LEAKY_RELU_SLOPES_NOT_UNIFORM = 3, RELU_NOT_FOUND = 4, - LAYER_NOT_FOUND = 5 + LAYER_NOT_FOUND = 5, + NEURON_NOT_FOUND = 6, }; NLRError( NLRError::Code code ) diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 8390a0190b..17a6417162 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -2,7 +2,7 @@ /*! \file NetworkLevelReasoner.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -20,7 +20,6 @@ #include "FloatUtils.h" #include "InfeasibleQueryException.h" #include "IterativePropagator.h" -#include "LPFormulator.h" #include "MILPFormulator.h" #include "MStringf.h" #include "MarabouError.h" @@ -75,6 +74,11 @@ void NetworkLevelReasoner::addLayerDependency( unsigned sourceLayer, unsigned ta _layerIndexToLayer[sourceLayer]->getSize() ); } +void NetworkLevelReasoner::removeLayerDependency( unsigned sourceLayer, unsigned targetLayer ) +{ + _layerIndexToLayer[targetLayer]->removeSourceLayer( sourceLayer ); +} + void NetworkLevelReasoner::computeSuccessorLayers() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) @@ -198,12 +202,57 @@ void NetworkLevelReasoner::clearConstraintTightenings() _boundTightenings.clear(); } +void NetworkLevelReasoner::receivePolygonalTightening( PolygonalTightening &polygonalTightening ) +{ + _polygonalBoundTightenings.append( polygonalTightening ); +} + +void NetworkLevelReasoner::getPolygonalTightenings( + List &polygonalTightenings ) +{ + polygonalTightenings = _polygonalBoundTightenings; + _polygonalBoundTightenings.clear(); +} + +void NetworkLevelReasoner::clearPolygonalTightenings() +{ + _polygonalBoundTightenings.clear(); +} + +void NetworkLevelReasoner::receiveInfeasibleBranches( + Map &neuronToBranchIndex ) +{ + _infeasibleBranches.append( neuronToBranchIndex ); +} + +void NetworkLevelReasoner::getInfeasibleBranches( + List> &infeasibleBranches ) +{ + infeasibleBranches = _infeasibleBranches; + _infeasibleBranches.clear(); +} + +void NetworkLevelReasoner::clearInfeasibleBranches() +{ + _infeasibleBranches.clear(); +} + void NetworkLevelReasoner::symbolicBoundPropagation() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) _layerIndexToLayer[i]->computeSymbolicBounds(); } +void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( const Vector &coeffs ) +{ + Map> layerIndicesToParameters = getParametersForLayers( coeffs ); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) + { + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs, true ); + } +} + void NetworkLevelReasoner::deepPolyPropagation() { if ( _deepPolyAnalysis == nullptr ) @@ -211,6 +260,127 @@ void NetworkLevelReasoner::deepPolyPropagation() _deepPolyAnalysis->run(); } +void NetworkLevelReasoner::parameterisedDeepPoly( bool storeSymbolicBounds, + const Vector &coeffs ) +{ + if ( !storeSymbolicBounds ) + { + bool useParameterisedSBT = coeffs.size() > 0; + Map> layerIndicesToParameters = + Map>( {} ); + if ( useParameterisedSBT ) + { + layerIndicesToParameters = getParametersForLayers( coeffs ); + } + + _deepPolyAnalysis = + std::unique_ptr( new DeepPolyAnalysis( this, + storeSymbolicBounds, + storeSymbolicBounds, + useParameterisedSBT, + &layerIndicesToParameters ) ); + + // Clear deepPolyAnalysis pointer after running. + _deepPolyAnalysis->run(); + _deepPolyAnalysis = nullptr; + } + else + { + // Clear the previous symbolic bound maps. + _outputSymbolicLb.clear(); + _outputSymbolicUb.clear(); + _outputSymbolicLowerBias.clear(); + _outputSymbolicUpperBias.clear(); + + _predecessorSymbolicLb.clear(); + _predecessorSymbolicUb.clear(); + _predecessorSymbolicLowerBias.clear(); + _predecessorSymbolicUpperBias.clear(); + + // Temporarily add weighted sum layer to the NLR of the same size of the output layer. + Layer *outputLayer = _layerIndexToLayer[getNumberOfLayers() - 1]; + unsigned outputLayerIndex = outputLayer->getLayerIndex(); + unsigned outputLayerSize = outputLayer->getSize(); + unsigned newLayerIndex = outputLayerIndex + 1; + + addLayer( newLayerIndex, Layer::WEIGHTED_SUM, outputLayerSize ); + addLayerDependency( outputLayerIndex, newLayerIndex ); + Layer *newLayer = _layerIndexToLayer[newLayerIndex]; + + for ( unsigned i = 0; i < outputLayerSize; ++i ) + { + setWeight( outputLayerIndex, i, newLayerIndex, i, 1 ); + newLayer->setLb( i, FloatUtils::infinity() ); + newLayer->setUb( i, FloatUtils::negativeInfinity() ); + } + + // Initialize maps with zero vectors. + for ( const auto &pair : _layerIndexToLayer ) + { + unsigned layerIndex = pair.first; + Layer *layer = pair.second; + unsigned size = layer->getSize(); + Layer::Type layerType = layer->getLayerType(); + + _outputSymbolicLb[layerIndex] = Vector( outputLayerSize * size, 0 ); + _outputSymbolicUb[layerIndex] = Vector( outputLayerSize * size, 0 ); + _outputSymbolicLowerBias[layerIndex] = Vector( outputLayerSize, 0 ); + _outputSymbolicUpperBias[layerIndex] = Vector( outputLayerSize, 0 ); + + if ( layerType != Layer::WEIGHTED_SUM && layerType != Layer::INPUT ) + { + unsigned maxSourceSize = 0; + for ( unsigned i = 0; i < size; ++i ) + { + unsigned sourceSize = layer->getActivationSources( i ).size(); + maxSourceSize = sourceSize > maxSourceSize ? sourceSize : maxSourceSize; + } + _predecessorSymbolicLb[layerIndex] = Vector( size * maxSourceSize, 0 ); + _predecessorSymbolicUb[layerIndex] = Vector( size * maxSourceSize, 0 ); + _predecessorSymbolicLowerBias[layerIndex] = Vector( size, 0 ); + _predecessorSymbolicUpperBias[layerIndex] = Vector( size, 0 ); + } + } + + // Populate symbolic bounds maps via DeepPoly. + bool useParameterisedSBT = coeffs.size() > 0; + Map> layerIndicesToParameters = + Map>( {} ); + if ( useParameterisedSBT ) + { + layerIndicesToParameters = getParametersForLayers( coeffs ); + } + + _deepPolyAnalysis = std::unique_ptr( + new DeepPolyAnalysis( this, + storeSymbolicBounds, + storeSymbolicBounds, + useParameterisedSBT, + &layerIndicesToParameters, + &_outputSymbolicLb, + &_outputSymbolicUb, + &_outputSymbolicLowerBias, + &_outputSymbolicUpperBias, + &_predecessorSymbolicLb, + &_predecessorSymbolicUb, + &_predecessorSymbolicLowerBias, + &_predecessorSymbolicUpperBias ) ); + + // Clear deepPolyAnalysis pointer after running. + _deepPolyAnalysis->run(); + _deepPolyAnalysis = nullptr; + + // Remove new weighted sum layer. + removeLayerDependency( outputLayerIndex, newLayerIndex ); + _layerIndexToLayer.erase( newLayerIndex ); + if ( newLayer ) + { + delete newLayer; + newLayer = NULL; + } + } +} + void NetworkLevelReasoner::lpRelaxationPropagation() { LPFormulator lpFormulator( this ); @@ -221,6 +391,30 @@ void NetworkLevelReasoner::lpRelaxationPropagation() Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + { + const Vector optimalCoeffs = OptimalParameterisedSymbolicBoundTightening(); + Map> layerIndicesToParameters = + getParametersForLayers( optimalCoeffs ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, false, layerIndicesToParameters ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, true, layerIndicesToParameters ); + } + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) + { + const Vector optimalCoeffs = OptimalParameterisedSymbolicBoundTightening(); + Map> layerIndicesToParameters = + getParametersForLayers( optimalCoeffs ); + const Vector &polygonalTightenings = + OptimizeParameterisedPolygonalTightening(); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, false, layerIndicesToParameters, polygonalTightenings ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, true, layerIndicesToParameters, polygonalTightenings ); + } else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::LP_RELAXATION ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer ); @@ -670,6 +864,243 @@ double NetworkLevelReasoner::getPreviousBias( const ReluConstraint *reluConstrai return _previousBiases[reluConstraint]; } +Vector NetworkLevelReasoner::getOutputSymbolicLb( unsigned layerIndex ) +{ + // Initialize map if empty. + if ( _outputSymbolicLb.empty() ) + { + parameterisedDeepPoly( true ); + } + + if ( !_outputSymbolicLb.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in output layer symbolic bounds map." ); + } + + return _outputSymbolicLb[layerIndex]; +} + +Vector NetworkLevelReasoner::getOutputSymbolicUb( unsigned layerIndex ) +{ + // Initialize map if empty. + if ( _outputSymbolicUb.empty() ) + { + parameterisedDeepPoly( true ); + } + + if ( !_outputSymbolicUb.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in output layer symbolic bounds map." ); + } + + return _outputSymbolicUb[layerIndex]; +} + +Vector NetworkLevelReasoner::getOutputSymbolicLowerBias( unsigned layerIndex ) +{ + // Initialize map if empty. + if ( _outputSymbolicLowerBias.empty() ) + { + parameterisedDeepPoly( true ); + } + + if ( !_outputSymbolicLowerBias.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in output layer symbolic bounds map." ); + } + + return _outputSymbolicLowerBias[layerIndex]; +} + +Vector NetworkLevelReasoner::getOutputSymbolicUpperBias( unsigned layerIndex ) +{ + // Initialize map if empty. + if ( _outputSymbolicUpperBias.empty() ) + { + parameterisedDeepPoly( true ); + } + + if ( !_outputSymbolicUpperBias.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in output layer symbolic bounds map." ); + } + + return _outputSymbolicUpperBias[layerIndex]; +} + +Vector NetworkLevelReasoner::getPredecessorSymbolicLb( unsigned layerIndex ) +{ + // Initialize map if empty. + if ( _predecessorSymbolicLb.empty() ) + { + parameterisedDeepPoly( true ); + } + + if ( !_predecessorSymbolicLb.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in predecessor layer symbolic bounds map." ); + } + + return _predecessorSymbolicLb[layerIndex]; +} + +Vector NetworkLevelReasoner::getPredecessorSymbolicUb( unsigned layerIndex ) +{ + // Initialize map if empty. + if ( _predecessorSymbolicUb.empty() ) + { + parameterisedDeepPoly( true ); + } + + if ( !_predecessorSymbolicUb.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in predecessor layer symbolic bounds map." ); + } + + return _predecessorSymbolicUb[layerIndex]; +} + +Vector NetworkLevelReasoner::getPredecessorSymbolicLowerBias( unsigned layerIndex ) +{ + // Initialize map if empty. + if ( _predecessorSymbolicLowerBias.empty() ) + { + parameterisedDeepPoly( true ); + } + + if ( !_predecessorSymbolicLowerBias.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in predecessor layer symbolic bounds map." ); + } + + return _predecessorSymbolicLowerBias[layerIndex]; +} + +Vector NetworkLevelReasoner::getPredecessorSymbolicUpperBias( unsigned layerIndex ) +{ + // Initialize map if empty. + if ( _predecessorSymbolicUpperBias.empty() ) + { + parameterisedDeepPoly( true ); + } + + if ( !_predecessorSymbolicUpperBias.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in predecessor layer symbolic bounds map." ); + } + + return _predecessorSymbolicUpperBias[layerIndex]; +} + +double NetworkLevelReasoner::getPMNRScore( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToPMNRScores.empty() ) + { + initializePMNRScoreMap(); + } + + if ( !_neuronToPMNRScores.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, "Neuron not found in PMNR scores map." ); + } + + return _neuronToPMNRScores[index]; +} + +std::pair NetworkLevelReasoner::getBBPSBranchingPoint( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToBBPSBranchingPoints.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToBBPSBranchingPoints.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branching points map." ); + } + + return _neuronToBBPSBranchingPoints[index]; +} + +Vector NetworkLevelReasoner::getSymbolicLbPerBranch( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToSymbolicLbPerBranch.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToSymbolicLbPerBranch.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branch symbolic bounds map." ); + } + + return _neuronToSymbolicLbPerBranch[index]; +} + +Vector NetworkLevelReasoner::getSymbolicUbPerBranch( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToSymbolicUbPerBranch.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToSymbolicUbPerBranch.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branch symbolic bounds map." ); + } + + return _neuronToSymbolicUbPerBranch[index]; +} + +Vector NetworkLevelReasoner::getSymbolicLowerBiasPerBranch( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToSymbolicLowerBiasPerBranch.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToSymbolicLowerBiasPerBranch.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branch symbolic bounds map." ); + } + + return _neuronToSymbolicLowerBiasPerBranch[index]; +} + +Vector NetworkLevelReasoner::getSymbolicUpperBiasPerBranch( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToSymbolicUpperBiasPerBranch.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToSymbolicUpperBiasPerBranch.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branch symbolic bounds map." ); + } + + return _neuronToSymbolicUpperBiasPerBranch[index]; +} + unsigned NetworkLevelReasoner::mergeConsecutiveWSLayers( const Map &lowerBounds, const Map &upperBounds, @@ -754,6 +1185,2197 @@ bool NetworkLevelReasoner::suitableForMerging( return true; } +const Vector NetworkLevelReasoner::OptimalParameterisedSymbolicBoundTightening() +{ + // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. + unsigned maxIterations = + GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + double stepSize = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weightDecay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + unsigned dimension = getNumberOfParameters(); + bool maximize = false; + double sign = ( maximize ? 1 : -1 ); + + Vector lowerBounds( dimension, 0 ); + Vector upperBounds( dimension, 1 ); + + // Initialize initial guess uniformly. + Vector guess( dimension ); + std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); + std::uniform_real_distribution dis( 0, 1 ); + for ( unsigned j = 0; j < dimension; ++j ) + { + double lb = lowerBounds[j]; + double ub = upperBounds[j]; + guess[j] = lb + dis( rng ) * ( ub - lb ); + } + + Vector> candidates( dimension ); + Vector gradient( dimension ); + + for ( unsigned i = 0; i < maxIterations; ++i ) + { + double currentCost = EstimateVolume( guess ); + for ( unsigned j = 0; j < dimension; ++j ) + { + candidates[j] = Vector( guess ); + candidates[j][j] += stepSize; + + if ( candidates[j][j] > upperBounds[j] || candidates[j][j] < lowerBounds[j] ) + { + gradient[j] = 0; + continue; + } + + double cost = EstimateVolume( candidates[j] ); + gradient[j] = ( cost - currentCost ) / stepSize + weightDecay * guess[j]; + } + + bool gradientIsZero = true; + for ( unsigned j = 0; j < dimension; ++j ) + { + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradientIsZero = false; + } + } + if ( gradientIsZero ) + { + break; + } + + for ( unsigned j = 0; j < dimension; ++j ) + { + guess[j] += sign * lr * gradient[j]; + + guess[j] = std::min( guess[j], upperBounds[j] ); + guess[j] = std::max( guess[j], lowerBounds[j] ); + } + } + + const Vector optimalCoeffs( guess ); + return optimalCoeffs; +} + +double NetworkLevelReasoner::EstimateVolume( const Vector &coeffs ) +{ + // First, run parameterised symbolic bound propagation. + Map> layerIndicesToParameters = getParametersForLayers( coeffs ); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) + { + ASSERT( _layerIndexToLayer.exists( i ) ); + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); + double logBoxVolume = 0; + double sigmoidSum = 0; + + unsigned inputLayerIndex = 0; + unsigned outputLayerIndex = _layerIndexToLayer.size() - 1; + Layer *inputLayer = _layerIndexToLayer[inputLayerIndex]; + Layer *outputLayer = _layerIndexToLayer[outputLayerIndex]; + + // Calculate volume of input variables' bounding box. + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) + { + if ( inputLayer->neuronEliminated( index ) ) + continue; + + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + + if ( lb == ub ) + continue; + + logBoxVolume += std::log( ub - lb ); + } + + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) + { + // Sample input point from known bounds. + Map point; + for ( unsigned j = 0; j < inputLayer->getSize(); ++j ) + { + if ( inputLayer->neuronEliminated( j ) ) + { + point.insert( j, 0 ); + } + else + { + double lb = inputLayer->getLb( j ); + double ub = inputLayer->getUb( j ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( j, dis( rng ) ); + } + } + + // Calculate sigmoid of maximum margin from output symbolic bounds. + double maxMargin = 0; + for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) + { + if ( outputLayer->neuronEliminated( j ) ) + continue; + + double margin = calculateDifferenceFromSymbolic( outputLayer, point, j ); + maxMargin = std::max( maxMargin, margin ); + } + sigmoidSum += SigmoidConstraint::sigmoid( maxMargin ); + } + + return std::exp( logBoxVolume + std::log( sigmoidSum ) ) / + GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; +} + +double NetworkLevelReasoner::calculateDifferenceFromSymbolic( const Layer *layer, + Map &point, + unsigned i ) const +{ + unsigned size = layer->getSize(); + unsigned inputLayerSize = _layerIndexToLayer[0]->getSize(); + double lowerSum = layer->getSymbolicLowerBias()[i]; + double upperSum = layer->getSymbolicUpperBias()[i]; + + for ( unsigned j = 0; j < inputLayerSize; ++j ) + { + lowerSum += layer->getSymbolicLb()[j * size + i] * point[j]; + upperSum += layer->getSymbolicUb()[j * size + i] * point[j]; + } + + return std::max( layer->getUb( i ) - upperSum, lowerSum - layer->getLb( i ) ); +} + +const Vector NetworkLevelReasoner::generatePolygonalTighteningsForPMNR() +{ + Vector tightenings = Vector( {} ); + Vector lowerDeepPolyTightenings = Vector( {} ); + Vector upperDeepPolyTightenings = Vector( {} ); + const Vector neurons = selectPMNRNeurons(); + unsigned neuronCount = neurons.size(); + + // Initial tightenings are non-fixed neurons and their DeepPoly symbolic bounds. + for ( const auto &pair : neurons ) + { + unsigned layerIndex = pair._layer; + unsigned neuron = pair._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + unsigned size = layer->getSize(); + + Map lowerCoeffs; + Map upperCoeffs; + lowerCoeffs[pair] = 1; + upperCoeffs[pair] = -1; + + unsigned inputIndex = 0; + List sources = layer->getActivationSources( neuron ); + for ( const auto &sourceIndex : sources ) + { + lowerCoeffs[sourceIndex] = + -getPredecessorSymbolicLb( layerIndex )[size * inputIndex + neuron]; + upperCoeffs[sourceIndex] = + getPredecessorSymbolicUb( layerIndex )[size * inputIndex + neuron]; + ++inputIndex; + } + PolygonalTightening lowerTightening( lowerCoeffs, + getPredecessorSymbolicLowerBias( layerIndex )[neuron], + PolygonalTightening::LB ); + PolygonalTightening upperTightening( upperCoeffs, + -getPredecessorSymbolicUpperBias( layerIndex )[neuron], + PolygonalTightening::LB ); + lowerDeepPolyTightenings.append( lowerTightening ); + upperDeepPolyTightenings.append( upperTightening ); + } + + /* + If DeepPoly bounds are x_f - \sum a_u_i x_i <= b_u, \sum x_f - a_u_i x_i >= b_l, PMNR + tightenings are linear combinations of x_f - \sum a_u_i x_i, coeffs in {-1, 0, 1}, and + linear combinations of x_f - \sum a_l_i x_i, coeffs in {-1, 0, 1}. + */ + const Vector weights = Vector( { -1, 0, 1 } ); + unsigned weightCount = weights.size(); + unsigned range = std::pow( weightCount, neuronCount ); + for ( unsigned i = 0; i < range; ++i ) + { + // Keep track of whether all coefficients for current tightening are non-negative, + // and count non-zero coefficients. + unsigned nonZeroWeights = 0; + bool allNonnegative = true; + Map lowerCoeffs; + Map upperCoeffs; + for ( unsigned j = 0; j < neuronCount; ++j ) + { + unsigned mask = std::pow( weightCount, j ); + unsigned flag = ( i / mask ) % weightCount; + double weight = weights[flag]; + if ( weight < 0 ) + { + allNonnegative = false; + } + if ( weight != 0 ) + { + ++nonZeroWeights; + } + + // Compute linear combinations of DeepPoly tightenings. + for ( const auto &pair : lowerDeepPolyTightenings[j]._neuronToCoefficient ) + { + if ( !lowerCoeffs.exists( pair.first ) ) + { + lowerCoeffs[pair.first] = weight * pair.second; + } + else + { + lowerCoeffs[pair.first] = lowerCoeffs[pair.first] + weight * pair.second; + } + } + for ( const auto &pair : upperDeepPolyTightenings[j]._neuronToCoefficient ) + { + if ( !upperCoeffs.exists( pair.first ) ) + { + upperCoeffs[pair.first] = weight * pair.second; + } + else + { + upperCoeffs[pair.first] = upperCoeffs[pair.first] + weight * pair.second; + } + } + } + + // No need to tighten the original DeepPoly bounds. + if ( nonZeroWeights <= 1 ) + { + continue; + } + + // Calculate initial concrete lower bound for all tightenings. If all coefficients are, + // non-negative, compute lower bounds by adding the DeepPoly tightenings' lower bounds. + double lowerTighteningLb = 0; + double upperTighteningLb = 0; + if ( allNonnegative ) + { + for ( unsigned j = 0; j < neuronCount; ++j ) + { + unsigned mask = std::pow( weightCount, j ); + unsigned flag = ( i / mask ) % weightCount; + double weight = weights[flag]; + lowerTighteningLb += weight * lowerDeepPolyTightenings[j]._value; + upperTighteningLb += weight * upperDeepPolyTightenings[j]._value; + } + } + + // If some weights are negative, compute lower bounds by concretizing. + else + { + for ( const auto &pair : lowerCoeffs ) + { + double neuronLb = + _layerIndexToLayer[pair.first._layer]->getLb( pair.first._neuron ); + double neuronUb = + _layerIndexToLayer[pair.first._layer]->getUb( pair.first._neuron ); + lowerTighteningLb += + pair.second >= 0 ? pair.second * neuronLb : pair.second * neuronUb; + } + for ( const auto &pair : upperCoeffs ) + { + double neuronLb = + _layerIndexToLayer[pair.first._layer]->getLb( pair.first._neuron ); + double neuronUb = + _layerIndexToLayer[pair.first._layer]->getUb( pair.first._neuron ); + upperTighteningLb += + pair.second >= 0 ? pair.second * neuronLb : pair.second * neuronUb; + } + } + PolygonalTightening lowerTightening( + lowerCoeffs, lowerTighteningLb, PolygonalTightening::LB ); + PolygonalTightening upperTightening( + upperCoeffs, upperTighteningLb, PolygonalTightening::LB ); + tightenings.append( lowerTightening ); + tightenings.append( upperTightening ); + } + + const Vector tighteningsVector = + Vector( tightenings ); + return tighteningsVector; +} + +const Vector NetworkLevelReasoner::selectPMNRNeurons() +{ + // Select layer with maximal PMNR neuron score sum. + const Vector &candidateLayers = getLayersWithNonfixedNeurons(); + if ( candidateLayers.empty() ) + { + const Vector emptyVector( {} ); + return emptyVector; + } + + double maxScore = 0; + unsigned maxScoreIndex = 0; + for ( const auto &layerIndex : candidateLayers ) + { + double layerScore = 0; + Layer *layer = _layerIndexToLayer[layerIndex]; + for ( const auto &index : layer->getNonfixedNeurons() ) + { + double neuronScore = getPMNRScore( index ); + layerScore += neuronScore; + } + + if ( layerScore > maxScore ) + { + maxScore = layerScore; + maxScoreIndex = layerIndex; + } + } + + // Extract highest score neurons from this layer. + Layer *layer = _layerIndexToLayer[maxScoreIndex]; + std::priority_queue, + std::vector>, + std::less>> + maxQueue; + const Vector nonfixedNeurons = layer->getNonfixedNeurons(); + for ( const auto &index : nonfixedNeurons ) + { + maxQueue.push( std::pair( getPMNRScore( index ), index._neuron ) ); + } + + unsigned neuronCount = + std::min( GlobalConfiguration::PMNR_SELECTED_NEURONS, nonfixedNeurons.size() ); + Vector selectedNeurons = Vector( neuronCount ); + for ( unsigned i = 0; i < neuronCount; ++i ) + { + selectedNeurons[i] = NeuronIndex( maxScoreIndex, maxQueue.top().second ); + maxQueue.pop(); + } + const Vector neurons( selectedNeurons ); + return neurons; +} + +const Vector NetworkLevelReasoner::OptimizeParameterisedPolygonalTightening() +{ + // Calculate successor layers, PMNR scores, symbolic bound maps before optimizing. + computeSuccessorLayers(); + parameterisedDeepPoly( true ); + initializePMNRScoreMap(); + + // Repeatedly optimize polygonal tightenings given previously optimized ones. + const Vector &selectedTightenings = generatePolygonalTighteningsForPMNR(); + Vector optimizedTightenings = Vector( {} ); + for ( unsigned i = 0; i < selectedTightenings.size(); ++i ) + { + PolygonalTightening tightening = selectedTightenings[i]; + bool maximize = ( tightening._type == PolygonalTightening::LB ); + double feasibiltyBound = maximize ? FloatUtils::infinity() : FloatUtils::negativeInfinity(); + double bound = OptimizeSingleParameterisedPolygonalTightening( + tightening, optimizedTightenings, maximize, feasibiltyBound ); + + // Attempt to obtain stronger bound by branching selected neurons. + tightening._value = OptimizeSingleParameterisedPolygonalTighteningWithBranching( + tightening, optimizedTightenings, maximize, bound ); + optimizedTightenings.append( tightening ); + + // Store optimized tightenings in NLR. + receivePolygonalTightening( tightening ); + } + + const Vector tightenings( optimizedTightenings ); + return tightenings; +} + +double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTighteningWithBranching( + PolygonalTightening &tightening, + Vector &prevTightenings, + bool maximize, + double originalBound ) +{ + // Determine which of the selected neurons support branching. + const Vector selectedNeurons = selectPMNRNeurons(); + Vector neurons = Vector( {} ); + for ( const auto &index : selectedNeurons ) + { + const Layer *layer = _layerIndexToLayer[index._layer]; + if ( layer->neuronNonfixed( index._neuron ) && + supportsInvpropBranching( layer->getLayerType() ) ) + { + neurons.append( index ); + } + } + if ( neurons.empty() ) + return originalBound; + + // Re-optimize current tightening for every branch combination. If we seek to maximize + // the tightening's bound, select minimal score of all combinations, and vice versa. + bool maximizeBranchBound = !maximize; + double newBound = maximizeBranchBound ? FloatUtils::negativeInfinity() : FloatUtils::infinity(); + unsigned neuronCount = neurons.size(); + Vector branchCounts( neuronCount, 0 ); + for ( unsigned i = 0; i < neuronCount; ++i ) + { + branchCounts[i] = getSymbolicLbPerBranch( neurons[i] ).size(); + } + unsigned range = + std::accumulate( branchCounts.begin(), branchCounts.end(), 1, std::multiplies() ); + for ( unsigned i = 0; i < range; ++i ) + { + Map neuronToBranchIndex; + for ( unsigned j = 0; j < neuronCount; ++j ) + { + unsigned mask = std::accumulate( branchCounts.begin(), + std::next( branchCounts.begin(), j ), + 1, + std::multiplies() ); + unsigned branchIndex = ( i / mask ) % branchCounts[j]; + NeuronIndex index = neurons[j]; + neuronToBranchIndex.insert( index, branchIndex ); + } + + // To determine some of the infeasible branch combinations, calculate a feasibility bound + // (known upper/lower bound for max/min problem) with concretization. + double feasibilityBound = 0; + for ( const auto &pair : tightening._neuronToCoefficient ) + { + double ub = _layerIndexToLayer[pair.first._layer]->getUb( pair.first._neuron ); + double lb = _layerIndexToLayer[pair.first._layer]->getLb( pair.first._neuron ); + if ( maximize ) + { + feasibilityBound += pair.second > 0 ? pair.second * ub : pair.second * lb; + } + else + { + feasibilityBound += pair.second > 0 ? pair.second * lb : pair.second * ub; + } + } + + double branchBound = OptimizeSingleParameterisedPolygonalTightening( + tightening, prevTightenings, maximize, feasibilityBound, neuronToBranchIndex ); + + // If bound is stronger than known feasibility bound, store branch combination in NLR. + if ( !FloatUtils::isFinite( branchBound ) || maximize ? branchBound > feasibilityBound + : branchBound < feasibilityBound ) + { + receiveInfeasibleBranches( neuronToBranchIndex ); + } + else + { + newBound = maximizeBranchBound ? std::max( branchBound, newBound ) + : std::min( branchBound, newBound ); + } + } + + newBound = maximize ? std::max( originalBound, newBound ) : std::min( originalBound, newBound ); + return newBound; +} + +double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( + PolygonalTightening &tightening, + Vector &prevTightenings, + bool maximize, + double feasibilityBound, + const Map &neuronToBranchIndex ) +{ + // Search over gamma in [0, inf)^sizeOfPrevTightenings with PGD. + unsigned maxIterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; + double gammaStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weightDecay = GlobalConfiguration::INVPROP_WEIGHT_DECAY; + double lr = GlobalConfiguration::INVPROP_LEARNING_RATE; + unsigned gammaDimension = prevTightenings.size(); + double sign = ( maximize ? 1 : -1 ); + double bestBound = tightening._value; + + Vector gammaLowerBounds( gammaDimension, 0 ); + + Vector gamma( gammaDimension, GlobalConfiguration::INVPROP_INITIAL_GAMMA ); + Vector previousGamma( gamma ); + + Vector> gammaCandidates( gammaDimension ); + Vector gammaGradient( gammaDimension ); + + for ( unsigned i = 0; i < maxIterations; ++i ) + { + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + gamma[j] += weightDecay * ( gamma[j] - previousGamma[j] ); + gamma[j] = std::max( gamma[j], gammaLowerBounds[j] ); + } + + double currentCost = getParameterisdPolygonalTighteningBound( + gamma, tightening, prevTightenings, neuronToBranchIndex ); + + // If calculated bound is stronger than known feasibility bound, stop optimization. + if ( !FloatUtils::isFinite( currentCost ) || maximize ? currentCost > feasibilityBound + : currentCost < feasibilityBound ) + { + return currentCost; + } + + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + gammaCandidates[j] = Vector( gamma ); + gammaCandidates[j][j] += gammaStepSize; + if ( gammaCandidates[j][j] < gammaLowerBounds[j] ) + { + gammaGradient[j] = 0; + continue; + } + + double cost = getParameterisdPolygonalTighteningBound( + gammaCandidates[j], tightening, prevTightenings, neuronToBranchIndex ); + if ( !FloatUtils::isFinite( cost ) || maximize ? cost > feasibilityBound + : cost < feasibilityBound ) + { + return cost; + } + + gammaGradient[j] = ( cost - currentCost ) / gammaStepSize; + bestBound = ( maximize ? std::max( bestBound, cost ) : std::min( bestBound, cost ) ); + } + + bool gradientIsZero = true; + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + if ( FloatUtils::abs( gammaGradient[j] ) > epsilon ) + { + gradientIsZero = false; + } + } + if ( gradientIsZero ) + { + break; + } + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + previousGamma[j] = gamma[j]; + gamma[j] += sign * lr * gammaGradient[j]; + gamma[j] = std::max( gamma[j], gammaLowerBounds[j] ); + } + } + + return bestBound; +} + +double NetworkLevelReasoner::getParameterisdPolygonalTighteningBound( + const Vector &gamma, + PolygonalTightening &tightening, + Vector &prevTightenings, + const Map &neuronToBranchIndex ) +{ + // Recursively compute vectors mu, muHat for every layer with the backpropagation procedure. + unsigned numLayers = _layerIndexToLayer.size(); + unsigned maxLayer = _layerIndexToLayer.size() - 1; + unsigned prevTigheningsCount = prevTightenings.size(); + unsigned inputLayerSize = _layerIndexToLayer[0]->getSize(); + double sign = ( tightening._type == PolygonalTightening::LB ? 1 : -1 ); + Vector> mu( numLayers ); + Vector> muHat( numLayers ); + + for ( unsigned layerIndex = numLayers; layerIndex-- > 0; ) + { + Layer *layer = _layerIndexToLayer[layerIndex]; + unsigned size = layer->getSize(); + mu[layerIndex] = Vector( size, 0 ); + muHat[layerIndex] = Vector( size, 0 ); + + if ( layerIndex < maxLayer ) + { + for ( unsigned i = 0; i < size; ++i ) + { + for ( unsigned successorIndex : layer->getSuccessorLayers() ) + { + const Layer *successorLayer = _layerIndexToLayer[successorIndex]; + unsigned successorSize = successorLayer->getSize(); + + if ( successorLayer->getLayerType() == Layer::WEIGHTED_SUM ) + { + const double *weights = successorLayer->getWeightMatrix( layerIndex ); + for ( unsigned j = 0; j < successorSize; ++j ) + { + if ( !successorLayer->neuronEliminated( j ) ) + { + muHat[layerIndex][i] += + mu[successorIndex][j] * weights[i * successorSize + j]; + } + } + } + else + { + for ( unsigned j = 0; j < successorSize; ++j ) + { + // Find the index of the current neuron in the successor's activation + // sources list. + bool found = false; + unsigned inputIndex = 0; + List sources = successorLayer->getActivationSources( j ); + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex._layer == layerIndex && sourceIndex._neuron == i ) + { + found = true; + break; + } + ++inputIndex; + } + NeuronIndex successor( successorIndex, j ); + if ( found ) + { + if ( !successorLayer->neuronEliminated( j ) ) + { + // When branching selected neurons, use predecessor symbolic + // bounds for current branch. + if ( neuronToBranchIndex.exists( successor ) ) + { + muHat[layerIndex][i] += + std::max( mu[successorIndex][j], 0.0 ) * + getSymbolicUbPerBranch( + successor )[neuronToBranchIndex[successor]]; + + muHat[layerIndex][i] -= + std::max( -mu[successorIndex][j], 0.0 ) * + getSymbolicLbPerBranch( + successor )[neuronToBranchIndex[successor]]; + } + else + { + muHat[layerIndex][i] += + std::max( mu[successorIndex][j], 0.0 ) * + getPredecessorSymbolicUb( + successorIndex )[successorSize * inputIndex + j]; + + muHat[layerIndex][i] -= + std::max( -mu[successorIndex][j], 0.0 ) * + getPredecessorSymbolicLb( + successorIndex )[successorSize * inputIndex + j]; + } + } + } + } + } + } + } + } + + if ( layerIndex > 0 ) + { + // Compute mu from muHat. + for ( unsigned i = 0; i < size; ++i ) + { + mu[layerIndex][i] += muHat[layerIndex][i] - + sign * tightening.getCoeff( NeuronIndex( layerIndex, i ) ); + for ( unsigned j = 0; j < prevTigheningsCount; ++j ) + { + PolygonalTightening pt = prevTightenings[j]; + double prevCoeff = pt.getCoeff( NeuronIndex( layerIndex, i ) ); + double currentSign = ( pt._type == PolygonalTightening::LB ? 1 : -1 ); + mu[layerIndex][i] += currentSign * gamma[j] * prevCoeff; + } + } + } + } + + // Compute global bound for input space minimization problem. + Vector inputLayerBound( inputLayerSize, 0 ); + for ( unsigned i = 0; i < inputLayerSize; ++i ) + { + inputLayerBound[i] += sign * tightening.getCoeff( NeuronIndex( 0, i ) ) - muHat[0][i]; + for ( unsigned j = 0; j < prevTigheningsCount; ++j ) + { + PolygonalTightening pt = prevTightenings[j]; + double prevCoeff = pt.getCoeff( NeuronIndex( 0, i ) ); + double currentSign = ( pt._type == PolygonalTightening::LB ? 1 : -1 ); + inputLayerBound[i] -= currentSign * gamma[j] * prevCoeff; + } + } + + // Compute bound for polygonal tightening bias using mu and inputLayerBound. + double bound = 0; + for ( unsigned i = 0; i < prevTigheningsCount; ++i ) + { + PolygonalTightening pt = prevTightenings[i]; + double currentSign = ( pt._type == PolygonalTightening::LB ? 1 : -1 ); + bound += currentSign * gamma[i] * pt._value; + } + + for ( unsigned layerIndex = maxLayer; layerIndex >= 1; --layerIndex ) + { + Layer *layer = _layerIndexToLayer[layerIndex]; + if ( layer->getLayerType() == Layer::WEIGHTED_SUM ) + { + const double *biases = layer->getBiases(); + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + bound -= mu[layerIndex][i] * biases[i]; + } + else + { + bound -= mu[layerIndex][i] * layer->getEliminatedNeuronValue( i ); + } + } + } + else + { + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + NeuronIndex index( layerIndex, i ); + if ( neuronToBranchIndex.exists( index ) ) + { + bound -= std::max( mu[layerIndex][i], 0.0 ) * + getSymbolicUpperBiasPerBranch( index )[neuronToBranchIndex[index]]; + bound += std::max( -mu[layerIndex][i], 0.0 ) * + getSymbolicLowerBiasPerBranch( index )[neuronToBranchIndex[index]]; + } + else + { + bound -= std::max( mu[layerIndex][i], 0.0 ) * + getPredecessorSymbolicUpperBias( layerIndex )[i]; + bound += std::max( -mu[layerIndex][i], 0.0 ) * + getPredecessorSymbolicLowerBias( layerIndex )[i]; + } + } + else + { + bound -= + FloatUtils::abs( mu[layerIndex][i] ) * layer->getEliminatedNeuronValue( i ); + } + } + } + } + + Layer *inputLayer = _layerIndexToLayer[0]; + for ( unsigned i = 0; i < inputLayerSize; ++i ) + { + bound += std::max( inputLayerBound[i], 0.0 ) * inputLayer->getLb( i ); + bound -= std::max( -inputLayerBound[i], 0.0 ) * inputLayer->getUb( i ); + } + return sign * bound; +} + +void NetworkLevelReasoner::initializePMNRScoreMap() +{ + // Clear PMNR score map. + _neuronToPMNRScores.clear(); + for ( const auto &pair : _layerIndexToLayer ) + { + for ( const auto &index : pair.second->getNonfixedNeurons() ) + { + _neuronToPMNRScores.insert( index, calculatePMNRBBPSScore( index ) ); + } + } +} + +double NetworkLevelReasoner::calculatePMNRBBPSScore( NeuronIndex index ) +{ + // Initialize BBPS branching points and branch symbolic bound maps. + initializeBBPSBranchingMaps(); + + Layer *outputLayer = _layerIndexToLayer[getNumberOfLayers() - 1]; + unsigned outputLayerSize = outputLayer->getSize(); + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + + // We have the symbolic bounds map of the output layer in terms of the given neuron's layer. + // Concretize all neurons except from the given neuron. + Vector concretizedOutputSymbolicLb( outputLayerSize, 0 ); + Vector concretizedOutputSymbolicUb( outputLayerSize, 0 ); + Vector concretizedOutputSymbolicLowerBias( outputLayerSize, 0 ); + Vector concretizedOutputSymbolicUpperBias( outputLayerSize, 0 ); + for ( unsigned i = 0; i < outputLayerSize; ++i ) + { + concretizedOutputSymbolicLb[i] = + getOutputSymbolicLb( layerIndex )[outputLayerSize * neuron + i]; + concretizedOutputSymbolicUb[i] = + getOutputSymbolicUb( layerIndex )[outputLayerSize * neuron + i]; + concretizedOutputSymbolicLowerBias[i] = getOutputSymbolicLowerBias( layerIndex )[i]; + concretizedOutputSymbolicUpperBias[i] = getOutputSymbolicUpperBias( layerIndex )[i]; + + for ( unsigned j = 0; j < layer->getSize(); ++j ) + { + if ( j != neuron ) + { + double lowerWeight = getOutputSymbolicLb( layerIndex )[outputLayerSize * j + i]; + double upperWeight = getOutputSymbolicUb( layerIndex )[outputLayerSize * j + i]; + concretizedOutputSymbolicLowerBias[i] += lowerWeight > 0 + ? lowerWeight * layer->getLb( j ) + : lowerWeight * layer->getUb( j ); + concretizedOutputSymbolicUpperBias[i] += upperWeight > 0 + ? upperWeight * layer->getUb( j ) + : upperWeight * layer->getLb( j ); + } + } + } + + // For every branch, we calculated the output layer's symbolic bounds in terms of the given + // neuron, and branch symbolic bounds of this neuron in terms of one source neuron. + std::pair point = getBBPSBranchingPoint( index ); + NeuronIndex sourceIndex = point.first; + const Layer *sourceLayer = getLayer( sourceIndex._layer ); + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + Vector symbolicLbPerBranch = getSymbolicLbPerBranch( index ); + Vector symbolicUbPerBranch = getSymbolicUbPerBranch( index ); + Vector symbolicLowerBiasPerBranch = getSymbolicLowerBiasPerBranch( index ); + Vector symbolicUpperBiasPerBranch = getSymbolicUpperBiasPerBranch( index ); + + unsigned branchCount = symbolicLbPerBranch.size(); + ASSERT( symbolicUbPerBranch.size() == branchCount ); + ASSERT( symbolicLowerBiasPerBranch.size() == branchCount ); + ASSERT( symbolicUpperBiasPerBranch.size() == branchCount ); + + // For every output neuron, substitute branch symbolic bounds in the output symbolic bounds, + // sum over all branches and output bounds. + double sourceSymbolicLb = 0; + double sourceSymbolicUb = 0; + double sourceSymbolicLowerBias = 0; + double sourceSymbolicUpperBias = 0; + for ( unsigned i = 0; i < outputLayerSize; ++i ) + { + for ( unsigned j = 0; j < branchCount; ++j ) + { + sourceSymbolicLowerBias += concretizedOutputSymbolicLowerBias[i]; + sourceSymbolicUpperBias += concretizedOutputSymbolicUpperBias[i]; + + if ( concretizedOutputSymbolicLb[i] > 0 ) + { + sourceSymbolicLb += concretizedOutputSymbolicLb[i] * symbolicLbPerBranch[j]; + sourceSymbolicLowerBias += + concretizedOutputSymbolicLb[i] * symbolicLowerBiasPerBranch[j]; + } + else + { + sourceSymbolicLb += concretizedOutputSymbolicLb[i] * symbolicUbPerBranch[j]; + sourceSymbolicLowerBias += + concretizedOutputSymbolicLb[i] * symbolicUpperBiasPerBranch[j]; + } + + if ( concretizedOutputSymbolicUb[i] > 0 ) + { + sourceSymbolicUb += concretizedOutputSymbolicUb[i] * symbolicUbPerBranch[j]; + sourceSymbolicUpperBias += + concretizedOutputSymbolicUb[i] * symbolicUpperBiasPerBranch[j]; + } + else + { + sourceSymbolicUb += concretizedOutputSymbolicUb[i] * symbolicLbPerBranch[j]; + sourceSymbolicUpperBias += + concretizedOutputSymbolicUb[i] * symbolicLowerBiasPerBranch[j]; + } + } + } + + // Concretize the source neuron to get upper and lower bounds for the symbolic expression. + // The neuron's final score is the range of the concrete bounds divided by the branch count. + double scoreLower = sourceSymbolicLb > 0 + ? sourceSymbolicLb * sourceLb + sourceSymbolicLowerBias + : sourceSymbolicLb * sourceUb + sourceSymbolicLowerBias; + double scoreUpper = sourceSymbolicUb > 0 + ? sourceSymbolicUb * sourceUb + sourceSymbolicUpperBias + : sourceSymbolicUb * sourceLb + sourceSymbolicUpperBias; + + return ( scoreUpper - scoreLower ) / branchCount; +} + +void NetworkLevelReasoner::initializeBBPSBranchingMaps() +{ + // Clear BBPS branching points and branch symbolic bound maps. + _neuronToBBPSBranchingPoints.clear(); + _neuronToSymbolicLbPerBranch.clear(); + _neuronToSymbolicUbPerBranch.clear(); + _neuronToSymbolicLowerBiasPerBranch.clear(); + _neuronToSymbolicUpperBiasPerBranch.clear(); + + // Calculate branching points, symbolic bounds for non-fixed neurons which support branching. + for ( const auto &pair : _layerIndexToLayer ) + { + const Layer *layer = pair.second; + for ( const auto &index : layer->getNonfixedNeurons() ) + { + if ( supportsInvpropBranching( layer->getLayerType() ) ) + { + std::pair point = calculateBranchingPoint( index ); + _neuronToBBPSBranchingPoints.insert( index, point ); + + NeuronIndex sourceIndex = point.first; + double value = point.second; + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + const Vector values = Vector( { sourceLb, value, sourceUb } ); + + unsigned branchCount = values.size() - 1; + Vector symbolicLbPerBranch = Vector( branchCount, 0 ); + Vector symbolicUbPerBranch = Vector( branchCount, 0 ); + Vector symbolicLowerBiasPerBranch = Vector( branchCount, 0 ); + Vector symbolicUpperBiasPerBranch = Vector( branchCount, 0 ); + + calculateSymbolicBoundsPerBranch( index, + sourceIndex, + values, + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch, + branchCount ); + + _neuronToSymbolicLbPerBranch.insert( index, symbolicLbPerBranch ); + _neuronToSymbolicUbPerBranch.insert( index, symbolicUbPerBranch ); + _neuronToSymbolicLowerBiasPerBranch.insert( index, symbolicLowerBiasPerBranch ); + _neuronToSymbolicUpperBiasPerBranch.insert( index, symbolicUpperBiasPerBranch ); + } + } + } +} + +const std::pair +NetworkLevelReasoner::calculateBranchingPoint( NeuronIndex index ) const +{ + const Layer *layer = _layerIndexToLayer[index._layer]; + unsigned neuron = index._neuron; + ASSERT( layer->neuronNonfixed( neuron ) ); + std::pair point; + + // Heuristically generate candidates for branching points. + Vector> candidates = + generateBranchingPointCandidates( layer, neuron ); + unsigned numberOfCandidates = candidates.size(); + + if ( numberOfCandidates == 1 ) + { + point = candidates[0]; + } + else + { + Vector scores( numberOfCandidates, 0 ); + double minScore = FloatUtils::infinity(); + unsigned minScoreIndex = 0; + for ( unsigned i = 0; i < numberOfCandidates; ++i ) + { + // Calculate branch symbolic bounds for every candidate. + NeuronIndex sourceIndex = candidates[i].first; + double value = candidates[i].second; + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + const Vector values = Vector( { sourceLb, value, sourceUb } ); + + unsigned branchCount = values.size() - 1; + Vector symbolicLbPerBranch = Vector( branchCount, 0 ); + Vector symbolicUbPerBranch = Vector( branchCount, 0 ); + Vector symbolicLowerBiasPerBranch = Vector( branchCount, 0 ); + Vector symbolicUpperBiasPerBranch = Vector( branchCount, 0 ); + calculateSymbolicBoundsPerBranch( index, + sourceIndex, + values, + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch, + branchCount ); + + // Select candidate which minimizes tightening loss. + scores[i] = calculateTighteningLoss( values, + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch, + branchCount ); + if ( scores[i] < minScore ) + { + minScore = scores[i]; + minScoreIndex = i; + } + } + point = candidates[minScoreIndex]; + } + + const std::pair branchingPoint( point ); + return branchingPoint; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidates( const Layer *layer, unsigned i ) const +{ + ASSERT( layer->neuronNonfixed( i ) ); + Layer::Type type = layer->getLayerType(); + + switch ( type ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + case Layer::SIGN: + case Layer::ABSOLUTE_VALUE: + { + return generateBranchingPointCandidatesAtZero( layer, i ); + break; + } + case Layer::ROUND: + { + return generateBranchingPointCandidatesForRound( layer, i ); + break; + } + case Layer::SIGMOID: + { + return generateBranchingPointCandidatesForSigmoid( layer, i ); + break; + } + case Layer::MAX: + { + return generateBranchingPointCandidatesForMax( layer, i ); + break; + } + case Layer::SOFTMAX: + { + return generateBranchingPointCandidatesForSoftmax( layer, i ); + break; + } + case Layer::BILINEAR: + { + return generateBranchingPointCandidatesForBilinear( layer, i ); + break; + } + default: + { + printf( "Error! Neuron type %u unsupported\n", type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + break; + } + } +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesAtZero( const Layer *layer, unsigned i ) const +{ + // A Relu/Sign/Abs/Leaky Relu activation is only branched at zero. + Vector> candidates; + NeuronIndex sourceIndex = *layer->getActivationSources( i ).begin(); + std::pair point( sourceIndex, 0 ); + candidates.append( point ); + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForRound( const Layer *layer, + unsigned i ) const +{ + // For a Round activation, the two candidates are selected as the highest value which + // rounds to the source's lb, and the lowest value which rounds to the source's ub. + Vector> candidates; + NeuronIndex sourceIndex = *layer->getActivationSources( i ).begin(); + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + std::pair pointLower( + sourceIndex, + FloatUtils::round( sourceLb ) + 0.5 - + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + std::pair pointUpper( + sourceIndex, + FloatUtils::round( sourceUb ) - 0.5 + + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + candidates.append( pointLower ); + candidates.append( pointUpper ); + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForSigmoid( const Layer *layer, + unsigned i ) const +{ + // For a Sigmoid activation, sample candidates uniformly in [sourceLb, sourceUb]. + Vector> candidates; + NeuronIndex sourceIndex = *layer->getActivationSources( i ).begin(); + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + unsigned numberOfCandidates = GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES; + for ( unsigned j = 0; j < numberOfCandidates; ++j ) + { + std::pair point( sourceIndex, + sourceLb + ( j + 1 ) * ( sourceUb - sourceLb ) / + ( numberOfCandidates + 1 ) ); + candidates.append( point ); + } + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForMax( const Layer *layer, unsigned i ) const +{ + // For a Max activation, calculate source index of largest lower bound + // and sample candidates uniformly in [sourceLb, sourceUb]. + Vector> candidates; + List sources = layer->getActivationSources( i ); + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + } + + const Layer *sourceLayer = _layerIndexToLayer[indexOfMaxLowerBound._layer]; + unsigned sourceNeuron = indexOfMaxLowerBound._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned numberOfCandidates = GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES; + for ( unsigned j = 0; j < numberOfCandidates; ++j ) + { + std::pair point( indexOfMaxLowerBound, + sourceLb + ( j + 1 ) * ( sourceUb - sourceLb ) / + ( numberOfCandidates + 1 ) ); + candidates.append( point ); + } + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForSoftmax( const Layer *layer, + unsigned i ) const +{ + // For a Softmax activation, calculate this neuron's source index in the Softmax + // and sample candidates uniformly in [sourceLb, sourceUb]. + Vector> candidates; + List sources = layer->getActivationSources( i ); + NeuronIndex selfIndex( 0, 0 ); + Set handledInputNeurons; + for ( unsigned j = 0; j < i; ++j ) + { + for ( const auto &sourceIndex : layer->getActivationSources( j ) ) + { + if ( !handledInputNeurons.exists( sourceIndex._neuron ) ) + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + } + for ( const auto &sourceIndex : sources ) + { + if ( !handledInputNeurons.exists( sourceIndex._neuron ) ) + { + selfIndex = sourceIndex; + break; + } + } + + const Layer *sourceLayer = _layerIndexToLayer[selfIndex._layer]; + unsigned sourceNeuron = selfIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned numberOfCandidates = GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES; + for ( unsigned j = 0; j < numberOfCandidates; ++j ) + { + std::pair point( selfIndex, + sourceLb + ( j + 1 ) * ( sourceUb - sourceLb ) / + ( numberOfCandidates + 1 ) ); + candidates.append( point ); + } + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForBilinear( const Layer *layer, + unsigned i ) const +{ + // For a Bilinear activation, sample candidates uniformly from sources' [sourceLb, sourceUb]. + Vector> candidates; + List sources = layer->getActivationSources( i ); + Vector sourceLbs; + Vector sourceUbs; + Vector sourceNeurons; + Vector sourceLayerIndices; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLayerIndices.append( sourceLayer->getLayerIndex() ); + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + } + + for ( unsigned j = 0; j < sources.size(); ++j ) + { + unsigned candidatesPerDimension = + GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES / sources.size(); + for ( unsigned k = 0; k < candidatesPerDimension; ++k ) + { + std::pair point( + NeuronIndex( sourceLayerIndices[j], sourceNeurons[j] ), + sourceLbs[j] + + ( k + 1 ) * ( sourceUbs[j] - sourceLbs[j] ) / ( candidatesPerDimension + 1 ) ); + candidates.append( point ); + } + } + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranch( + NeuronIndex index, + NeuronIndex sourceIndex, + const Vector &values, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch, + unsigned branchCount ) const +{ + ASSERT( symbolicLbPerBranch.size() == branchCount ); + ASSERT( symbolicUbPerBranch.size() == branchCount ); + ASSERT( symbolicLowerBiasPerBranch.size() == branchCount ); + ASSERT( symbolicUpperBiasPerBranch.size() == branchCount ); + ASSERT( values.size() == branchCount + 1 ); + + unsigned layerIndex = index._layer; + Layer *layer = _layerIndexToLayer[layerIndex]; + ASSERT( layer->neuronNonfixed( index._neuron ) ); + Layer::Type type = layer->getLayerType(); + + for ( unsigned i = 0; i < branchCount; ++i ) + { + switch ( type ) + { + case Layer::RELU: + calculateSymbolicBoundsPerBranchForRelu( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::ABSOLUTE_VALUE: + calculateSymbolicBoundsPerBranchForAbsoluteValue( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::SIGN: + calculateSymbolicBoundsPerBranchForSign( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::ROUND: + calculateSymbolicBoundsPerBranchForRound( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::SIGMOID: + calculateSymbolicBoundsPerBranchForSigmoid( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::LEAKY_RELU: + calculateSymbolicBoundsPerBranchForLeakyRelu( index, + i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::MAX: + calculateSymbolicBoundsPerBranchForMax( index, + sourceIndex, + i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::SOFTMAX: + calculateSymbolicBoundsPerBranchForSoftmax( index, + sourceIndex, + i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::BILINEAR: + calculateSymbolicBoundsPerBranchForBilinear( index, + sourceIndex, + i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + default: + { + printf( "Error! Neuron type %u unsupported\n", type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + break; + } + } + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForRelu( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: 0 <= x_f <= 0 + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = 0; + } + else + { + // ReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + double weight = sourceUb / ( sourceUb - sourceLb ); + symbolicUbPerBranch[i] = weight; + symbolicUpperBiasPerBranch[i] = -sourceLb * weight; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > -sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= 0 + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = 0; + } + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForAbsoluteValue( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: -x_b <= x_f <= -x_b + symbolicUbPerBranch[i] = -1; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = -1; + symbolicLowerBiasPerBranch[i] = 0; + } + else + { + // AbsoluteValue not fixed + // Naive concretization: 0 <= x_f <= max(-lb, ub) + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = FloatUtils::max( -sourceLb, sourceUb ); + + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = 0; + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForSign( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: 1 <= x_f <= 1 + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = 1; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = 1; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: -1 <= x_f <= -1 + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = -1; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = -1; + } + else + { + // Sign not fixed + // Use the relaxation defined in https://arxiv.org/pdf/2011.02948.pdf + // Symbolic upper bound: x_f <= -2 / l * x_b + 1 + symbolicUbPerBranch[i] = -2 / sourceLb; + symbolicUpperBiasPerBranch[i] = 1; + + // Symbolic lower bound: x_f >= (2 / u) * x_b - 1 + symbolicLbPerBranch[i] = 2 / sourceUb; + symbolicLowerBiasPerBranch[i] = -1; + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForRound( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + double sourceUbRound = FloatUtils::round( sourceUb ); + double sourceLbRound = FloatUtils::round( sourceLb ); + + if ( sourceUbRound == sourceLbRound ) + { + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = sourceUbRound; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = sourceLbRound; + } + else + { + // Round not fixed + // Symbolic upper bound: x_f <= x_b + 0.5 + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0.5; + + // Symbolic lower bound: x_f >= x_b - 0.5 + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = -0.5; + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForSigmoid( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); + double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); + + if ( sourceUb == sourceLb ) + { + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = sourceUbSigmoid; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = sourceLbSigmoid; + } + else + { + double lambda = ( sourceUbSigmoid - sourceLbSigmoid ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + symbolicLbPerBranch[i] = lambda; + symbolicLowerBiasPerBranch[i] = sourceLbSigmoid - lambda * sourceLb; + } + else + { + symbolicLbPerBranch[i] = lambdaPrime; + symbolicLowerBiasPerBranch[i] = sourceLbSigmoid - lambdaPrime * sourceLb; + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + symbolicUbPerBranch[i] = lambda; + symbolicUpperBiasPerBranch[i] = sourceUbSigmoid - lambda * sourceUb; + } + else + { + symbolicUbPerBranch[i] = lambdaPrime; + symbolicUpperBiasPerBranch[i] = sourceUbSigmoid - lambdaPrime * sourceUb; + } + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForLeakyRelu( + NeuronIndex index, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + double slope = _layerIndexToLayer[index._layer]->getAlpha(); + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: slope * x_b <= x_f <= slope * x_b + symbolicUbPerBranch[i] = slope; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = slope; + symbolicLowerBiasPerBranch[i] = 0; + } + else + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + double width = sourceUb - sourceLb; + double weight = ( sourceUb - slope * sourceLb ) / width; + + if ( slope <= 1 ) + { + symbolicUbPerBranch[i] = weight; + symbolicUpperBiasPerBranch[i] = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= 0 + // Concrete lower bound: x_f >= 0 + symbolicLbPerBranch[i] = slope; + symbolicLowerBiasPerBranch[i] = 0; + } + } + else + { + symbolicLbPerBranch[i] = weight; + symbolicLowerBiasPerBranch[i] = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + if ( sourceUb > sourceLb ) + { + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + } + else + { + symbolicUbPerBranch[i] = slope; + symbolicLowerBiasPerBranch[i] = 0; + } + } + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForMax( + NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + List sources = layer->getActivationSources( neuron ); + + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceLbs; + Map sourceUbs; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double currentLb = + sourceIndex != chosenSourceIndex ? sourceLayer->getLb( sourceNeuron ) : sourceLb; + double currentUb = + sourceIndex != chosenSourceIndex ? sourceLayer->getUb( sourceNeuron ) : sourceUb; + sourceLbs[sourceIndex] = currentLb; + sourceUbs[sourceIndex] = currentUb; + + if ( maxLowerBound < currentLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = currentLb; + } + if ( maxUpperBound < currentUb ) + { + maxUpperBound = currentUb; + } + } + + // The phase is fixed if the lower-bound of a source variable x_b is + // larger than the upper-bounds of the other source variables. + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + + if ( phaseFixed ) + { + // Phase fixed + // Symbolic bound: x_b <= x_f <= x_b + // Concretized bound (if chosenSourceIndex != indexOfMaxLowerBound): x_b.lb <= x_f <= + // x_b.ub. + if ( chosenSourceIndex != indexOfMaxLowerBound ) + { + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = sourceLbs[indexOfMaxLowerBound]; + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = sourceUbs[indexOfMaxLowerBound]; + } + else + { + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + } + } + else + { + // MaxPool not fixed + // Symbolic bounds: x_b <= x_f <= maxUpperBound + // Concretized bound (if chosenSourceIndex != indexOfMaxLowerBound): x_b.lb <= x_f <= + // maxUpperBound. + if ( chosenSourceIndex != indexOfMaxLowerBound ) + { + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = sourceLbs[indexOfMaxLowerBound]; + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = maxUpperBound; + } + else + { + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = maxUpperBound; + } + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForSoftmax( + NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + List sources = layer->getActivationSources( neuron ); + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double currentLb = + sourceIndex != chosenSourceIndex ? sourceLayer->getLb( sourceNeuron ) : sourceLb; + double currentUb = + sourceIndex != chosenSourceIndex ? sourceLayer->getUb( sourceNeuron ) : sourceUb; + sourceLbs.append( currentLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( currentUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( currentLb + currentUb ) / 2 ); + targetLbs.append( layer->getLb( neuron ) ); + targetUbs.append( layer->getUb( neuron ) ); + } + + unsigned selfIndex = 0; + Set handledInputNeurons; + for ( unsigned i = 0; i < neuron; ++i ) + { + for ( const auto &sourceIndex : layer->getActivationSources( i ) ) + { + if ( !handledInputNeurons.exists( sourceIndex._neuron ) ) + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + } + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++selfIndex; + else + { + break; + } + } + + double lb = std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, selfIndex ), + layer->getLb( neuron ) ); + double ub = std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, selfIndex ), + layer->getUb( neuron ) ); + targetLbs[selfIndex] = lb; + targetUbs[selfIndex] = ub; + + if ( FloatUtils::areEqual( lb, ub ) ) + { + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = ub; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = lb; + } + else + { + // Compute Softmax symbolic bound. Neurons other than given source neuron are concretized. + if ( Options::get()->getSoftmaxBoundType() == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + symbolicLowerBiasPerBranch[i] = + Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, selfIndex ); + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dldj = Layer::dLSELowerBound( + sourceMids, sourceLbs, sourceUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedLowerBias = + dldj > 0 ? dldj * sourceLayer->getLb( sourceIndex._neuron ) + : dldj * sourceLayer->getUb( sourceIndex._neuron ); + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + } + else + { + symbolicLbPerBranch[i] = dldj; + } + symbolicLowerBiasPerBranch[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else + { + symbolicLowerBiasPerBranch[i] = + Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, selfIndex ); + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dldj = Layer::dLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedLowerBias = + dldj > 0 ? dldj * sourceLayer->getLb( sourceIndex._neuron ) + : dldj * sourceLayer->getUb( sourceIndex._neuron ); + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + } + else + { + symbolicLbPerBranch[i] = dldj; + } + symbolicLowerBiasPerBranch[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + + symbolicUpperBiasPerBranch[i] = + Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, selfIndex ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dudj = Layer::dLSEUpperbound( + sourceMids, targetLbs, targetUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedUpperBias = + dudj > 0 ? dudj * sourceLayer->getUb( sourceIndex._neuron ) + : dudj * sourceLayer->getLb( sourceIndex._neuron ); + symbolicUpperBiasPerBranch[i] += concretizedUpperBias; + } + else + { + symbolicUbPerBranch[i] = dudj; + } + symbolicUpperBiasPerBranch[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else if ( Options::get()->getSoftmaxBoundType() == + SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + symbolicLowerBiasPerBranch[i] = + Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, selfIndex ); + unsigned inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dldj = + Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedLowerBias = + dldj > 0 ? dldj * sourceLayer->getLb( sourceIndex._neuron ) + : dldj * sourceLayer->getUb( sourceIndex._neuron ); + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + } + else + { + symbolicLbPerBranch[i] = dldj; + } + symbolicLowerBiasPerBranch[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + + symbolicUpperBiasPerBranch[i] = + Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, selfIndex ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dudj = + Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedUpperBias = + dudj > 0 ? dudj * sourceLayer->getUb( sourceIndex._neuron ) + : dudj * sourceLayer->getLb( sourceIndex._neuron ); + symbolicUpperBiasPerBranch[i] += concretizedUpperBias; + } + else + { + symbolicUbPerBranch[i] = dudj; + } + symbolicUpperBiasPerBranch[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + } +} +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForBilinear( + NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + List sources = layer->getActivationSources( neuron ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double currentLb = + sourceIndex != chosenSourceIndex ? sourceLayer->getLb( sourceNeuron ) : sourceLb; + double currentUb = + sourceIndex != chosenSourceIndex ? sourceLayer->getUb( sourceNeuron ) : sourceUb; + + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceIndex ); + sourceLbs.append( currentLb ); + sourceUbs.append( currentUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + symbolicUbPerBranch[i] = 0; + symbolicLbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = sourceValues[0] * sourceValues[1]; + symbolicLowerBiasPerBranch[i] = sourceValues[0] * sourceValues[1]; + } + else + { + // Symbolic lower bound: + // out >= alpha * x + beta * y + gamma + // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y. + + // Symbolic upper bound: + // out <= alpha * x + beta * y + gamma + // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y. + + // Neuron other than given source neuron is concretized. + + double aLower = sourceLbs[1]; + double aUpper = sourceUbs[1]; + double bLower = sourceLbs[0]; + double bUpper = sourceLbs[0]; + if ( sourceNeurons[0] != chosenSourceIndex ) + { + double concretizedLowerBias = + aLower > 0 ? aLower * sourceLayers[0]->getLb( sourceNeurons[0]._neuron ) + : aLower * sourceLayers[0]->getUb( sourceNeurons[0]._neuron ); + double concretizedUpperBias = + aUpper > 0 ? aUpper * sourceLayers[0]->getUb( sourceNeurons[0]._neuron ) + : aUpper * sourceLayers[0]->getLb( sourceNeurons[0]._neuron ); + symbolicLbPerBranch[i] = bLower; + symbolicUbPerBranch[i] = bUpper; + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + symbolicUpperBiasPerBranch[i] += concretizedUpperBias; + } + else + { + double concretizedLowerBias = + bLower > 0 ? bLower * sourceLayers[1]->getLb( sourceNeurons[1]._neuron ) + : bLower * sourceLayers[1]->getUb( sourceNeurons[1]._neuron ); + double concretizedUpperBias = + bUpper > 0 ? bUpper * sourceLayers[1]->getUb( sourceNeurons[1]._neuron ) + : bUpper * sourceLayers[1]->getLb( sourceNeurons[1]._neuron ); + symbolicLbPerBranch[i] = aLower; + symbolicUbPerBranch[i] = aUpper; + symbolicUpperBiasPerBranch[i] += concretizedUpperBias; + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + } + symbolicLowerBiasPerBranch[i] += -sourceLbs[0] * sourceLbs[1]; + symbolicUpperBiasPerBranch[i] += -sourceLbs[0] * sourceUbs[1]; + } +} + +double +NetworkLevelReasoner::calculateTighteningLoss( const Vector &values, + const Vector &symbolicLbPerBranch, + const Vector &symbolicUbPerBranch, + const Vector &symbolicLowerBiasPerBranch, + const Vector &symbolicUpperBiasPerBranch, + unsigned branchCount ) const +{ + ASSERT( symbolicLbPerBranch.size() == branchCount ); + ASSERT( symbolicUbPerBranch.size() == branchCount ); + ASSERT( symbolicLowerBiasPerBranch.size() == branchCount ); + ASSERT( symbolicUpperBiasPerBranch.size() == branchCount ); + ASSERT( values.size() == branchCount + 1 ); + + double score = 0; + for ( unsigned i = 0; i < branchCount; ++i ) + { + // Given branch #i symbolic bounds of x_f >= a_l x_b + b_l, x_f <= a_u x_b + b_u, x_b in + // [l_i, u_i], calculate integral of ( a_u x_b + b_u ) - ( a_l x_b + b_l ) in [l_i, u_i]. + score += symbolicUbPerBranch[i] * + ( std::pow( values[i + 1], 2 ) - std::pow( values[i], 2 ) ) / 2; + score += symbolicUpperBiasPerBranch[i] * ( values[i + 1] - values[i] ); + score -= symbolicLbPerBranch[i] * + ( std::pow( values[i + 1], 2 ) - std::pow( values[i], 2 ) ) / 2; + score -= symbolicLowerBiasPerBranch[i] * ( values[i + 1] - values[i] ); + } + return score; +} + +const Map> +NetworkLevelReasoner::getParametersForLayers( const Vector &coeffs ) const +{ + ASSERT( coeffs.size() == getNumberOfParameters() ); + unsigned index = 0; + Map> layerIndicesToParameters; + for ( const auto &pair : _layerIndexToLayer ) + { + unsigned layerIndex = pair.first; + Layer *layer = pair.second; + unsigned coeffsCount = getNumberOfParametersPerType( layer->getLayerType() ); + Vector currentCoeffs( coeffsCount ); + for ( unsigned i = 0; i < coeffsCount; ++i ) + { + currentCoeffs[i] = coeffs[index + i]; + } + layerIndicesToParameters.insert( layerIndex, currentCoeffs ); + index += coeffsCount; + } + const Map> parametersForLayers( layerIndicesToParameters ); + return parametersForLayers; +} + +unsigned NetworkLevelReasoner::getNumberOfParameters() const +{ + unsigned num = 0; + for ( const auto &pair : _layerIndexToLayer ) + { + Layer *layer = pair.second; + num += getNumberOfParametersPerType( layer->getLayerType() ); + } + return num; +} + +unsigned NetworkLevelReasoner::getNumberOfParametersPerType( Layer::Type t ) const +{ + if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) + return 1; + + if ( t == Layer::SIGN || t == Layer::BILINEAR ) + return 2; + + return 0; +} + +bool NetworkLevelReasoner::supportsInvpropBranching( Layer::Type type ) const +{ + // When using BBPS heuristic, all implemented activations could be branched before INVPROP. + if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) + { + return type == Layer::RELU || type == Layer::LEAKY_RELU || type == Layer::SIGN || + type == Layer::ABSOLUTE_VALUE || type == Layer::MAX || type == Layer::ROUND || + type == Layer::SIGMOID || type == Layer::SOFTMAX || type == Layer::BILINEAR; + } + return false; +} + +const Vector NetworkLevelReasoner::getLayersWithNonfixedNeurons() const +{ + Vector layerWithNonfixedNeurons = Vector( {} ); + for ( const auto &pair : _layerIndexToLayer ) + { + if ( !pair.second->getNonfixedNeurons().empty() ) + { + layerWithNonfixedNeurons.append( pair.first ); + } + } + const Vector layerList = Vector( layerWithNonfixedNeurons ); + return layerList; +} void NetworkLevelReasoner::mergeWSLayers( unsigned secondLayerIndex, Map &eliminatedNeurons ) diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 2660795be6..e9dd4aa106 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -2,7 +2,7 @@ /*! \file NetworkLevelReasoner.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -11,7 +11,7 @@ ** ** [[ Add lengthier description here ]] -**/ + **/ #ifndef __NetworkLevelReasoner_h__ #define __NetworkLevelReasoner_h__ @@ -24,6 +24,7 @@ #include "MatrixMultiplication.h" #include "NeuronIndex.h" #include "PiecewiseLinearFunctionType.h" +#include "PolygonalTightening.h" #include "Tightening.h" #include "Vector.h" @@ -49,6 +50,7 @@ class NetworkLevelReasoner : public LayerOwner */ void addLayer( unsigned layerIndex, Layer::Type type, unsigned layerSize ); void addLayerDependency( unsigned sourceLayer, unsigned targetLayer ); + void removeLayerDependency( unsigned sourceLayer, unsigned targetLayer ); void computeSuccessorLayers(); void setWeight( unsigned sourceLayer, unsigned sourceNeuron, @@ -103,6 +105,24 @@ class NetworkLevelReasoner : public LayerOwner bound on the upper bound of a ReLU node is negative, that ReLU is inactive and its output can be set to 0. + - Parametrised Symbolic: For certain activation functions, there + is a continuum of valid symbolic bounds. We receive a map of + coefficients in range [0, 1] for every layer index, then compute + the parameterised symbolic bounds (or default to regular + symbolic bounds if parameterised bounds not implemented). + + - DeepPoly: For every neuron in the network, calculate symbolic + bounds in term of its predecessor neurons, then perform + backsubstitution up to the input layer and concretize. + + - Parameterised DeepPoly: For certain activation functions, there + is a continuum of valid symbolic bounds. We receive a map of + coefficients in range [0, 1] for every layer index, then compute + the DeepPoly bounds via backsubstitution and concretization. For + each layer, a symbolic bound in terms its highest predecessor layers + is stored in the predessorSymbolic maps. Symbolic bounds for the last + layer in terms of every layer are stored in the outputLayer maps. + - LP Relaxation: invoking an LP solver on a series of LP relaxations of the problem we're trying to solve, and optimizing the lower and upper bounds of each of the @@ -114,6 +134,20 @@ class NetworkLevelReasoner : public LayerOwner - getConstraintTightenings: this is the function that an external user calls in order to collect the tighter bounds discovered by the NLR. + + - receivePolygonalTightening: this is a callback from the layer + objects, through which they report polygonal bounds. + + - getPolygonalTightenings: this is the function that an + external user calls in order to collect the polygonal bounds + discovered by the NLR. + + - receiveInfeasibleBranches: this is a callback from the layer + objects, through which they report infeasible branches combinations. + + - getPolygonalTightenings: this is the function that an + external user calls in order to collect the infeasible branches combinations + discovered by the NLR. */ void setTableau( const ITableau *tableau ); @@ -123,7 +157,10 @@ class NetworkLevelReasoner : public LayerOwner void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); + void parameterisedSymbolicBoundPropagation( const Vector &coeffs ); void deepPolyPropagation(); + void parameterisedDeepPoly( bool storeSymbolicBounds = false, + const Vector &coeffs = Vector( {} ) ); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); void MILPPropagation(); @@ -134,6 +171,17 @@ class NetworkLevelReasoner : public LayerOwner void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); + void receivePolygonalTightening( PolygonalTightening &polygonalTightening ); + void getPolygonalTightenings( List &polygonalTightenings ); + void clearPolygonalTightenings(); + + void receiveInfeasibleBranches( Map &neuronToBranchIndex ); + void getInfeasibleBranches( List> &infeasibleBranches ); + void clearInfeasibleBranches(); + + // Get total number of optimizable parameters for parameterised SBT relaxation. + unsigned getNumberOfParameters() const; + /* For debugging purposes: dump the network topology */ @@ -178,6 +226,35 @@ class NetworkLevelReasoner : public LayerOwner */ double getPreviousBias( const ReluConstraint *reluConstraint ) const; + // Get symbolic bounds for the last layer in term of given layer. + Vector getOutputSymbolicLb( unsigned layerIndex ); + Vector getOutputSymbolicUb( unsigned layerIndex ); + Vector getOutputSymbolicLowerBias( unsigned layerIndex ); + Vector getOutputSymbolicUpperBias( unsigned layerIndex ); + + // Get symbolic bounds of given layer in terms of its predecessor. + Vector getPredecessorSymbolicLb( unsigned layerIndex ); + Vector getPredecessorSymbolicUb( unsigned layerIndex ); + Vector getPredecessorSymbolicLowerBias( unsigned layerIndex ); + Vector getPredecessorSymbolicUpperBias( unsigned layerIndex ); + + /* + Get the symbolic bounds in term of predecessor layer given branch of given neuron. + */ + Vector getSymbolicLbPerBranch( NeuronIndex index ); + Vector getSymbolicUbPerBranch( NeuronIndex index ); + Vector getSymbolicLowerBiasPerBranch( NeuronIndex index ); + Vector getSymbolicUpperBiasPerBranch( NeuronIndex index ); + + /* + Get the BBPS branching point of given neuron: Map containing the + branching point for every predecessor neuron. + */ + std::pair getBBPSBranchingPoint( NeuronIndex index ); + + // Get PMNR neuron selection heuristic score for given neuron. + double getPMNRScore( NeuronIndex index ); + /* Finds logically consecutive WS layers and merges them, in order to reduce the total number of layers and variables in the @@ -204,16 +281,36 @@ class NetworkLevelReasoner : public LayerOwner Map _layerIndexToLayer; const ITableau *_tableau; - // Tightenings discovered by the various layers + // Tightenings and Polyognal Tightenings discovered by the various layers List _boundTightenings; + List _polygonalBoundTightenings; + List> _infeasibleBranches; std::unique_ptr _deepPolyAnalysis; - void freeMemoryIfNeeded(); - List _constraintsInTopologicalOrder; + Map> _predecessorSymbolicLb; + Map> _predecessorSymbolicUb; + Map> _predecessorSymbolicLowerBias; + Map> _predecessorSymbolicUpperBias; + + Map> _outputSymbolicLb; + Map> _outputSymbolicUb; + Map> _outputSymbolicLowerBias; + Map> _outputSymbolicUpperBias; + + Map _neuronToPMNRScores; + + Map> _neuronToBBPSBranchingPoints; + Map> _neuronToSymbolicLbPerBranch; + Map> _neuronToSymbolicUbPerBranch; + Map> _neuronToSymbolicLowerBiasPerBranch; + Map> _neuronToSymbolicUpperBiasPerBranch; + + void freeMemoryIfNeeded(); + // Map each neuron to a linear expression representing its weighted sum void generateLinearExpressionForWeightedSumLayer( Map &variableToExpression, @@ -242,6 +339,48 @@ class NetworkLevelReasoner : public LayerOwner unsigned outputDimension ); void reduceLayerIndex( unsigned layer, unsigned startIndex ); + // Return optimizable parameters which minimize parameterised SBT bounds' volume. + const Vector OptimalParameterisedSymbolicBoundTightening(); + + // Optimize biases of generated parameterised polygonal tightenings. + const Vector OptimizeParameterisedPolygonalTightening(); + + // Estimate Volume of parameterised symbolic bound tightening. + double EstimateVolume( const Vector &coeffs ); + + // Return difference between given point and upper and lower bounds determined by parameterised + // SBT relaxation. + double calculateDifferenceFromSymbolic( const Layer *layer, + Map &point, + unsigned i ) const; + + // Heuristically generating optimizable polygonal tightening for PMNR. + const Vector generatePolygonalTighteningsForPMNR(); + + // Heuristically select neurons for PMNR. + const Vector selectPMNRNeurons(); + + // Optimize biases of generated parameterised polygonal tightenings. + double OptimizeSingleParameterisedPolygonalTightening( + PolygonalTightening &tightening, + Vector &prevTightenings, + bool maximize, + double feasibilityBound, + const Map &neuronToBranchIndex = Map( {} ) ); + + double OptimizeSingleParameterisedPolygonalTighteningWithBranching( + PolygonalTightening &tightening, + Vector &prevTightenings, + bool maximize, + double bound ); + + // Get current lower bound for selected parameterised polygonal tightenings' biases. + double getParameterisdPolygonalTighteningBound( + const Vector &gamma, + PolygonalTightening &tightening, + Vector &prevTightenings, + const Map &neuronToBranchIndex = Map( {} ) ); + /* Store previous biases for each ReLU neuron in a map for getPreviousBias() and BaBSR heuristic @@ -249,6 +388,146 @@ class NetworkLevelReasoner : public LayerOwner Map _previousBiases; void initializePreviousBiasMap(); + // Calculate PMNRScore for every non-fixed neurons. + void initializePMNRScoreMap(); + double calculatePMNRBBPSScore( NeuronIndex index ); + + // Initialize PMNR-BBPS branching point scores and per-branch predecessor symbolic bounds for + // every non-fixed neuron. + void initializeBBPSBranchingMaps(); + const std::pair calculateBranchingPoint( NeuronIndex index ) const; + + // Heuristically generate candidates for branching points. + const Vector> + generateBranchingPointCandidates( const Layer *layer, unsigned i ) const; + + // Helper functions for generating branching points. + const Vector> + generateBranchingPointCandidatesAtZero( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForRound( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForSigmoid( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForMax( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForSoftmax( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForBilinear( const Layer *layer, unsigned i ) const; + + // Given neuron index, source index and branch ranges, compute symbolic bounds per branch. + // If activation has multiple sources, sources other than given neuron are concretized. + void calculateSymbolicBoundsPerBranch( NeuronIndex index, + NeuronIndex sourceIndex, + const Vector &values, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch, + unsigned branchCount ) const; + + // Helper functions for calculating branch symbolic bounds. + void + calculateSymbolicBoundsPerBranchForRelu( unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void calculateSymbolicBoundsPerBranchForAbsoluteValue( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForSign( unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForRound( unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForSigmoid( unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void calculateSymbolicBoundsPerBranchForLeakyRelu( + NeuronIndex index, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void calculateSymbolicBoundsPerBranchForMax( NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForSoftmax( NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForBilinear( NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + + // Calculate tightening loss of branch symbolic bounds. + double calculateTighteningLoss( const Vector &values, + const Vector &symbolicLbPerBranch, + const Vector &symbolicUbPerBranch, + const Vector &symbolicLowerBiasPerBranch, + const Vector &symbolicUpperBiasPerBranch, + unsigned branchCount ) const; + + // Get map containing vector of optimizable parameters for parameterised SBT relaxation for + // every layer index. + const Map> + getParametersForLayers( const Vector &coeffs ) const; + + // Get number of optimizable parameters for parameterised SBT relaxation per layer type. + unsigned getNumberOfParametersPerType( Layer::Type t ) const; + + // Determine whether activation type and PMNR strategy support branching before INVPROP. + bool supportsInvpropBranching( Layer::Type type ) const; + + // Get all indices of layers with non-fixed neurona. + const Vector getLayersWithNonfixedNeurons() const; + /* If the NLR is manipulated manually in order to generate a new input query, this method can be used to assign variable indices diff --git a/src/nlr/tests/Test_DeepPolyAnalysis.h b/src/nlr/tests/Test_DeepPolyAnalysis.h index 14f39a3d40..150bccb9c0 100644 --- a/src/nlr/tests/Test_DeepPolyAnalysis.h +++ b/src/nlr/tests/Test_DeepPolyAnalysis.h @@ -14,7 +14,6 @@ **/ #include "../../engine/tests/MockTableau.h" -#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "InputQuery.h" #include "Layer.h" @@ -824,6 +823,8 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite // Mark the Sigmoid sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Round sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -952,7 +953,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite { /* - x0 x3 S x6 x1 x4 S x7 @@ -1001,6 +1001,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 1, 2 ); nlr.setBias( 1, 2, 3 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); @@ -1011,7 +1012,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 1, 1, 2, 2 ); nlr.addActivationSource( 1, 2, 2, 2 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -1050,7 +1050,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); } - void test_deeppoly_softmax1() { NLR::NetworkLevelReasoner nlr; @@ -1193,12 +1192,10 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite return false; } - void populateNetworkSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - x0 x3 S x8 x1 x4 S x9 @@ -1269,6 +1266,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 3, 2 ); nlr.setBias( 1, 4, 1 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); nlr.addActivationSource( 1, 4, 2, 0 ); @@ -1392,22 +1390,21 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite Vector inputUb = { 0, 2, 4 }; Vector input = { -0.5, 1, 2.5 }; - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + double value = NLR::Layer::ERLowerBound( input, inputLb, inputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - Vector outputLb = { 0.2, 0, 0 }; Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + value = NLR::Layer::ERUpperBound( input, outputLb, outputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + value = NLR::Layer::dERUpperBound( input, outputLb, outputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + value = NLR::Layer::dERUpperBound( input, outputLb, outputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); } @@ -1416,20 +1413,22 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite Vector inputLb = { -1, 0, 1 }; Vector inputUb = { 0, 2, 3 }; Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + + double value = NLR::Layer::LSELowerBound( input, inputLb, inputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); Vector outputLb = { 0.2, 0, 0 }; Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + + value = NLR::Layer::LSEUpperBound( input, outputLb, outputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); } @@ -1437,7 +1436,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite { /* - x0 x2 x x4 -- x5 x1 x3 @@ -1466,10 +1464,10 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 1, 1, 1, 1 ); nlr.setWeight( 2, 0, 3, 0, -1 ); + // Mark the Bilinear sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -1589,7 +1587,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.setBias( 5, 0, 1 ); - // Mark the ReLU sources + // Mark the LeakyReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); @@ -1793,16 +1791,5802 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite } } - bool existsBounds( const List &bounds, Tightening bound ) + void populateNetworkDeepPolyRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - for ( const auto &b : bounds ) - { - if ( b._type == bound._type && b._variable == bound._variable ) - { - if ( FloatUtils::areEqual( b._value, bound._value ) ) - return true; - } - } - return false; + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkDeepPolyReluResidual1( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 + \ / + \ 3 / + \________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 1, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 1, 0, 5, 0, 3 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void populateNetworkDeepPolyReluResidual2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 + \ / + \ 1 / + \_______________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 6; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 0, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 0, 0, 5, 0, 1 ); + nlr.setWeight( 5, 0, 6, 0, 1 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkDeepPolyReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 1 1 1 + x0 --- x2 x5 --- x6 x9 --- x10 + \ /\ /\ / \ / \ / + 1 \ / R\ /-1\ / R \ / 1 \ / + \/ \/ \/ \/ \/ + /\ /\ /\ /\ /\ + 1 / \ R/ \ 1/ \ R / \ 1 / \ + / \/ \/ \ / \ / 0 \ + x1 --- x3 x4 --- x7 x8 --- x11 + -1 1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 0 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkDeepPolyLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR 1 LR 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ LR / \ LR / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + using LeakyReLU activation instead of ReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.2 ); + nlr.getLayer( 4 )->setAlpha( 0.2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkDeepPolySigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + + 1 S 1 Rd + x0 --- x2 ---> x4 --- x6 --- x8 + \ / \ / + 1 \ / 1 \ / + \/ \/ + /\ /\ + 1 / \ 1 / \ + / \ S / \ Rd + x1 --- x3 ---> x5 --- x7 --- x9 + -1 -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Round sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 10 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + } + + void populateNetworkDeepPolyMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R Max 2 + x0 --- x2 ---> x4 --- x6 ---> x7 + \ / / + 1 \ / / + \/ / + /\ / + 1 / \ / + / \ R / + x1 --- x3 ---> x5 + -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::MAX, 1 ); + nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + nlr.setWeight( 3, 0, 4, 0, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Max sources + nlr.addActivationSource( 2, 0, 3, 0 ); + nlr.addActivationSource( 2, 1, 3, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 8 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + } + + void populateNetworkDeepPolySoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x3 S x6 + + x1 x4 S x7 + + x2 x5 S x8 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + + x6 x7 x8 = softmax(x3, x4, x5) + + x9 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + } + + void populateNetworkDeepPolySoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x3 S x8 + + x1 x4 S x9 + + x2 x5 S x10 + + x6 S x11 + + x7 S x12 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 + + x8 x10 x12 = softmax(x3, x5, x7) + + x9 x11 = softmax(x4, x6) + + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 17 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkDeepPolyBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x2 + x x4 -- x5 + x1 x3 + + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 + + x4 = x2 * x3 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void test_deeppoly_relus_all_active2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => x2 - x3 <= x6 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relus_active_and_inactive2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = -x0 - x1 : [-11, -5] + x6.ub = -x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relus_active_and_not_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. 12 = ub > -lb = 4, using ReLU lower + coefficient of 1. Upper coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [-4, 12] + x2 <= x4 <= 0.75 x2 + 3 + x4.lb = 2x0 + 3x1 - 15 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + x6.lb = x0 + 2x1 - 15 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [4 + 2 - 15 = -9, 3 + 6.25 - 8.25 = 1] = [-9, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -4, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -9, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relus_active_and_externally_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relu_residual3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1. 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x1): + + x5 = 3x4 + 3x1 + 1 + x5.lb = 3 ( -1.5x0 + 0.5 ) + 3 ( x0 ) + 1 = -1.5x0 + 2.5 : [1, 4] + x5.ub = 3 ( -2/3 x0 + 4/3 ) + 3 ( x0 ) + 1 = x0 + 5 : [4, 6] + x5 range: [1, 6] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relu_residual4() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 + x5.lb = 3 ( -1.5x0 + 0.5 ) + 1 ( x0 ) + 1 = -3.5x0 + 2.5 : [-1, 6] + x5.ub = 3 ( -2/3 x0 + 4/3 ) + 1 ( x0 ) + 1 = -x0 + 5 : [4, 6] + x5 range: [-1, 6] + + Layer 6: + x6 = x5 + x6.lb = -3.5x0 + 2.5 : [-1, 6] + x6.ub = -x0 + 5 : [4, 6] + x6 range: [-1, 6] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relu_reindex2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + 0 <= x4 <= 0.5x2 + 1 + x4.lb = 0 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + 0 <= x5 <= 0.5x3 + 1 + x5.lb = 0 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layers 3, 4: + + x6 = x4 + x5 + x6.lb = 1 ( 0 ) + 1 ( 0 ) = 0 : [0, 0] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [0, 3] + + x7 = x4 - x5 + x7.lb = 1 ( 0 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = - 0.5x0 + 0.5x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0 ) = 0.5x0 + 0.5x1 + 1 : [0, 2] + x7 range: [-2, 2] + + First ReLU is active, bounds surive the activation + Second ReLUs is undecided, bound is concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient (second ReLU): 2/( 2--2 ) = 2/4 = 0.5 + + x6 <= x8 <= x6 + x8.lb = 0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + 0 <= x9 <= 0.5 x7 + 1 + x9.lb = 0 + x9.ub = 0.5 ( 0.5x0 + 0.5x1 + 1 ) + 1 = 0.25x0 + 0.25x1 + 1.5 + x9 range: [0, 2] + + Layer 5: + x10 = x8 + x9 + 1 + x10.lb = 1 ( 0 ) + 1 ( 0 ) + 1 = 1 : [1, 1] + x10.ub = 1 ( x6 ) + 1 ( 0.5 x7 + 1 ) + 1 = 1 ( x4 + x5 ) + 1 ( 0.5 x4 - 0.5x5 + 1 ) + 1 + = 1.5x4 + 0.5x5 + 2 <= 0.75x2 + 0.25x3 + 4 = x0 + 0.5x1 + 4 : [2.5, 5.5] + x10 range: [1, 5.5] + + x11 = x9 + x11.lb = 0 + x11.ub = 0.25x0 + 0.25x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_abs_all_positive2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => x2 - x3 <= x5 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_abs_positive_and_negative2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => -x2 - x3 <= x5 <= -x2 - x3 + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_absolute_values_positive_and_not_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + 0 <= x4 <= 12 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x3 <= x6 <= -x3 + 12 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_absolute_values_active_and_externally_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4: all set to 3 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x2 - x3 <= x6 <= -x2 - x3 + => -x3 + 3 <= x6 <= -x3 + 3 + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_signs_positive_and_not_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. + + 1/6 x2 - 1 <= x4 <= 1/2 x2 + 1 + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 + x4 range: [-1, 1] + + 1 <= x5 <= 1 + x5.lb = 1 + x5.ub = 1 + x5 range: [1, 1] + + Layer 3: + + x6 = x4 - x5 : [-2, 0] + => 1/6 x2 - 2 <= x6 <= 1/2 x2 : [-8/3, 6] + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 1/3 x0 + 1/2 x1 - 4.5 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] + + x6 range: [-2, 0] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 0, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_signs_active_and_externally_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + -1 <= x4 <= 1 + x4: all set to -1 + + 1 <= x5 <= 1 + x5: all set to 1 + + Layer 3: + + x6 = x5 - x4 + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_leaky_relu2() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + x2 <= x4 <= 0.6 x2 + 0.8 + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-2, 2] + + x3 <= x5 <= 0.6 x3 + 0.8 + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-2, 2] + + Layers 3, 4: + + x6 = x4 + x5 + => x2 + x3 <= x6 <= 0.6 x2 + 0.6 x3 + 1.6 + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7 = x4 - x5 + => x2 - 0.6x3 - 0.8 <= x6 <= 0.6 x2 - x3 + 0.8 + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 = 2/3 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x6 <= x8 <= 10/15 x6 + 14/15 + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-2, 2.8] + + x7 <= x9 <= 0.6x7 + 1.12 + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] + + Layer 5: + + x10 = x8 + x9 + 1 + => x6 + x7 + 1 <= x10 <= 2/3 x6 + 0.6 x7 + 229/75 + => 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75 + => 2x2 + 1 <= x10 <= 0.76 x2 + 0.04 x3 + 4.12 + x10.lb = 2x0 + 2x1 + 1 : [-3, 5] + x10.ub = 0.8 x0 + 0.72 x1 + 4.12 : [2.6, 5.64] + x10 range: [-3, 5.64] + + x11 = x9 + => x7 <= x11 <= 0.6x7 + 1.12 + => x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12 + => x2 - 0.6x3 - 0.8 <= x11 <= 0.36 x2 - 0.6 x3 + 1.6 + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -2, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -2, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -2.8, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 5.64, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_sigmoids_and_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda7_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_deeppoly_max_not_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layers 1, 2, 3: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + First ReLU: 3 = ub > -lb = 2, using lower ReLU coefficient of 1. + Upper coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6. + First ReLU: 2 = ub <= -lb = 3, using lower ReLU coefficient of 0. + Upper coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x2 <= x4 <= 0.6 x2 + 1.2 + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [-2, 3] + + 0 <= x5 <= 0.4 x3 + 1.2 + x5.lb = 0 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x5, and its upper bound is constant 3. + + x5 <= x6 <= 3 + x6.lb = 0 : [0, 0] + x6.ub = 3 : [3, 3] + x6 range: [0, 3] + + Layer 4: + + x7 = 2x6 + => 2x5 <= x7 <= 6 + x7.lb = 2 ( 0 ) = 0 : [0, 0] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [0, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -2, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_max_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + 0 <= x4 <= 0 + x4: all set to 0 + + x3 <= x5 <= x3 + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x5 <= x6 <= x5 + => x3 <= x6 <= x5 + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7 = 2x6 + => x7 = 2x5 = 2x3 = 2x0 - 2x1 + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_softmax4() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + } + + void test_deeppoly_softmax5() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, i ); // Using er + symbolicUpperBias[i] = + NLR::Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + + void test_deeppoly_softmax6() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + + x6 = -x0 - x1 - x2 + 2 + x6.lb = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6.ub = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6 range: [ -1.000003, -1 ] + + x7 = -x0 - x1 - x2 + 1 + x7.lb = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7.ub = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7 range: [ -2.000003, -2 ] + */ + + // First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + unsigned size = nlr.getLayer( 2 )->getActivationSources( 0 ).size(); + Vector sourceLbs = { 1.999899, -0.000003, -2.000103 }; + Vector sourceUbs = { 2.000102, 0.0001, -1.999 }; + Vector sourceMids = { 2.0000005, -0.0000015, -2.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1035, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1154, + -0.1017, + -0.0138, + -0.1017, + 0.1036, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + + // Second Sigmoid: x9 x11 = softmax( x4, x6 ). + size = nlr.getLayer( 2 )->getActivationSources( 1 ).size(); + sourceLbs = Vector( { 2.999899, -1.000103 } ); + sourceUbs = Vector( { 3.000102, -0.9999 } ); + sourceMids = Vector( { 3.0000005, -1.0000015 } ); + targetLbs = Vector( size, 0 ); + targetUbs = Vector( size, 0 ); + symbolicLb = Vector( size * size, 0 ); + symbolicUb = Vector( size * size, 0 ); + symbolicLowerBias = Vector( size, 0 ); + symbolicUpperBias = Vector( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( compareVectors( symbolicLowerBias, Vector( { 0.9114, 0.0886 } ) ) ); + TS_ASSERT( compareVectors( symbolicUpperBias, Vector( { 0.9114, 0.0886 } ) ) ); + + /* + Layer 2: + + First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 + x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 + x8 range: [ 0.8668, 0.8668 ] + +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 + x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 + x10.ub = -0.2033 x0 + 0.0000 x1 - 0.2033 x2 + 0.5241 + x10 range: [ 0.1173, 0.1173 ] + +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12.ub = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12 range: [ 0.0159, 0.0159 ] + + Second Sigmoid: x9 x11 = softmax( x4, x6 ). +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9 range: [ 0.9820, 0.0180 ] + +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11 range: [ 0.9820, 0.0180 ] + + Layer 3: + + x13 = x8 + x10 + x12 + => x13 = ( 0.1155 - 0.1017 - 0.0138 ) x3 + ( -0.1017 + 0.1035 - 0.0019 ) x5 + + ( -0.0138 - 0.0019 + 0.0156 ) x7 + ( 0.6084 + 0.3170 + 0.0747 ) + + => x13 = 0 x3 - 0.0001 x5 - 0.0001 x7 + 1.0001 + => ( Up to rounding ) 1 <= x13 <= 1. + x13.lb = 1 + x13.ub = 1 + x13 range: [ 1, 1 ] + + x14 = - x8 - x10 - x12 + => x14 = - ( 0.1155 - 0.1017 - 0.0138 ) x3 - ( -0.1017 + 0.1035 - 0.0019 ) x5 + - ( -0.0138 - 0.0019 + 0.0156 ) x7 - ( 0.6084 + 0.3170 + 0.0747 ) + + => x14 = 0 x3 + 0.0001 x5 + 0.0001 x7 - 1.0001 + => ( Up to rounding ) -1 <= x14 <= -1. + x14.lb = -1 + x14.ub = -1 + x14 range: [ -1, -1 ] + + x15 = x9 + x11 + => x15 = ( 0.0177 - 0.0177 ) x4 + ( -0.0177 + 0.0177 ) x6 + ( 0.9114 + 0.0886 ) + + => x15 = 0 x4 + 0 x6 + 1 + => ( Up to rounding ) 1 <= x15 <= 1. + x15.lb = 1 + x15.ub = 1 + x15 range: [ 1, 1 ] + + x16 = - x9 - x11 + => x16 = - ( 0.0177 - 0.0177 ) x4 - ( -0.0177 + 0.0177 ) x6 - ( 0.9114 + 0.0886 ) + + => x16 = 0 x4 + 0 x6 - 1 + => ( Up to rounding ) -1 <= x16 <= -1. + x16.lb = -1 + x16.ub = -1 + x16 range: [ -1, -1 ] + */ + + List expectedBounds( { + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.86681, Tightening::LB ), Tightening( 8, 0.86682, Tightening::UB ), + Tightening( 9, 0.98201, Tightening::LB ), Tightening( 9, 0.98201, Tightening::UB ), + Tightening( 10, 0.11731, Tightening::LB ), Tightening( 10, 0.11731, Tightening::UB ), + Tightening( 11, 0.017985, Tightening::LB ), Tightening( 11, 0.017986, Tightening::UB ), + Tightening( 12, 0.015875, Tightening::LB ), Tightening( 12, 0.015876, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_bilinear2() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layers 1, 2: + + x2 = x0 - 2x1 + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb x3.ub = --1 * 3 = 3 + + -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3 + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-6, 18] + + Layer 3: + + x5 = -x4 + => -3x2 + x3 - 3 <= x4 <= x2 + x3 + 1 + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + x1 - 1 ) = 2x0 - x1 + 1 : [2, 7] + x5 range: [-18, 6] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -6, Tightening::LB ), + Tightening( 4, 18, Tightening::UB ), + Tightening( 5, -18, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => x2 - x3 <= x6 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = -x0 - x1 : [-11, -5] + x6.ub = -x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [-2, 12] + 0.5 x2 <= x4 <= 0.75 x2 + 3 + x4.lb = 0.5 ( 2x0 + 3x1 - 15 ) = x0 + 1.5 x1 - 7.5 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> 0.5 x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + x6.lb = 0.5 x1 - 7.5 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [0.5 - 7.5 = -7, 3 + 6.25 - 8.25 = 1] = [-7, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -2, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -7, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1. 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0.5 x0 + x2.ub = 0.5 x0 + 0.5 + x2 range: [-0.5, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5 x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5 x0 ) -1x0 + 1 = -1.5 x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + x4.lb = 0.5 ( -1.5 x0 + 0.5 ) = -0.75 x0 + 0.25 : [-0.5, 1] + x4.ub = 5/7 ( -1.5 x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [1, 35/14 = 2.5] + x4 range: [-0.5, 2.5] + + Layer 5 (with residual from x1): + + x5 = 3x4 + 3x1 + 1 + x5.lb = 3 ( -0.75 x0 + 0.25 ) + 3 ( x0 ) + 1 = 0.75x0 + 1.75 : [1, 2.5] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + x5 range: [1, 5.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, -0.5, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper cCoefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [-0.5, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5 x0 ) -1x0 + 1 = -1.5 x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + x4.lb = 0.5 ( -1.5 x0 + 0.5 ) = -0.75 x0 + 0.25 : [-0.5, 1] + x4.ub = 5/7 ( -1.5 x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [1, 35/14 = 2.5] + x4 range: [-0.5, 2.5] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 + x5.lb = 3 ( -0.75 x0 + 0.25 ) + ( x0 ) + 1 = -1.25x0 + 1.75 : [0.5, 3] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x5 range: [0.5, 7.5] + + Layer 6: + x6 = x5 + x6.lb = -1.25x0 + 1.75 : [0.5, 3] + x6.ub = -31/14 x0 + 74/14 : [43/14, 7.5] + x6 range: [0.5, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, -0.5, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0.5, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + 0.5 x2 <= x4 <= 0.5x2 + 1 + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [-1, 2] + + 0.5 x3 <= x5 <= 0.5x3 + 1 + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [-1, 2] + + Layers 3, 4: + + x6 = x4 + x5 + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7 = x4 - x5 + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5. + Upper coefficient (first ReLU): 3/( 3--1 ) = 3/4 = 0.75 + Upper coefficient (second ReLU): 2/( 2--2 ) = 2/4 = 0.5 + + 0.5 x6 <= x8 <= 0.75 x6 + 0.75 + x8.lb = 0.5 ( x0 ) = 0.5 x0 + x8.ub = 0.75 ( x0 + 2 ) + 0.75 = 0.75 x0 + 2.25 + x8 range: [-0.5, 3] + + 0.5 x7 <= x9 <= 0.5 x7 + 1 + x9.lb = 0.5 ( x1 - 1 ) = 0.5 x1 - 0.5 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [-1, 2] + + Layer 5: + x10 = x8 + x9 + 1 + x10.lb = 1 ( 0.5 x6 ) + 1 ( 0.5 x7 ) + 1 = ( 0.5 x4 + 0.5x5 ) + 1 ( 0.5 x4 - 0.5x5 ) + 1 + = x4 + 1 >= 0.5 x2 + 1 = 0.5 x0 + 0.5x1 + 1 : [0, 2] + x10.ub = 1 ( 0.75 x6 + 0.75 ) + 1 ( 0.5 x7 + 1 ) + 1 + = ( 0.75 x4 + 0.75 x5 + 0.75 ) + 1 ( 0.5 x4 - 0.5x5 + 1 ) + 1 + = 1.25 x4 + 0.25 x5 + 2.75 <= 0.625 x4 + 0.125 x5 + 4.25 + = 0.75 x0 + 0.5 x1 + 4.25 : [2.5, 5.5] + x10 range: [0, 5.5] + + x11 = x9 + x11.lb = 0.5 x1 - 0.5 : [-1, 0] + x11.ub = 0.5x1 + 1.5 : [1, 2] + x11 range: [-1, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => x2 - x3 <= x5 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => -x2 - x3 <= x5 <= -x2 - x3 + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + 0 <= x4 <= 12 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x3 <= x6 <= -x3 + 12 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4: all set to 3 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x2 - x3 <= x6 <= -x2 - x3 + => -x3 + 3 <= x6 <= -x3 + 3 + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficient (first Sign, lower): 2/12 * 0.5 = 1/12. + Coefficient (first Sign, upper): -2/-4 * 0.5 = 1/4. + + 1/12 x2 - 1 <= x4 <= 1/4 x2 + 1 + x4.lb = 1/12 ( 2x0 + 3x1 - 15 ) - 1 = 2/12 x0 + 3/12 x1 - 27/12 + x4.ub = 1/4 ( 2x0 + 3x1 - 15 ) + 1 = 0.5 x0 + 0.75x1 - 2.75 + x4 range: [-1, 1] + + 1 <= x5 <= 1 + x5.lb = 1 + x5.ub = 1 + x5 range: [1, 1] + + Layer 3: + + x6 = x4 - x5 : [-2, 0] + => 1/12 x2 - 2 <= x6 <= 1/4 x2 : [-8/3, 6] + x6.lb = 1 ( 2/12 x0 + 3/12 x1 - 27/12 ) - 1 ( 1 ) = 2/12 x0 + 3/12 x1 - 39/12 : + [-28/12 = -7/3, -1] + x6.ub = 1 ( 0.5 x0 + 0.75x1 - 2.75 ) - 1 ( 1 ) = 0.5 x0 + 0.75x1 - 3.75 : [-1, 3] + + x6 range: [-2, 0] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 0, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + -1 <= x4 <= 1 + x4: all set to -1 + + 1 <= x5 <= 1 + x5: all set to 1 + + Layer 3: + + x6 = x5 - x4 + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient: ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias: 0 + Upper Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias: ( 0.2 - 1 ) * 2 * -2 /( 2--2 ) = 0.8 + + 0.6 x2 <= x4 <= 0.6 x2 + 0.8 + x4.lb = 0.6 ( x0 + x1 ) = 0.6 x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6 x0 + 0.6 x1 + 0.8 + x4 range: [-1.2, 2] + + 0.6 x3 <= x5 <= 0.6 x3 + 0.8 + x5.lb = 0.6 ( x0 - x1 ) = 0.6 x0 - 0.6 x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6 x0 - 0.6 x1 + 0.8 + x5 range: [-1.2, 2] + + Layer 2: + + x6 = x4 + x5 + x6.lb = 1 ( 0.6x0 + 0.6x1 ) + 1 ( 0.6x0 - 0.6x1 ) = 1.2 x0 : [-1.2, 1.2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x0 + 1.6 : + [0.4, 2.8] x6 range: [-1.2, 2.8] + + x7 = x4 - x5 + x7.lb = 1 ( 0.6x0 + 0.6x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x1 - 0.8 : [-2, 0.4] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( 0.6x0 - 0.6x1 ) = 1.2 x1 + 0.8 : [-0.4, 2] + x7 range: [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient (first LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (first LeakyReLU): 0 + Upper Coefficient (first LeakyReLU): ( 2.8 - 0.2*-1.2 )/( 2.8--1.2 ) = 3.04/4 = 0.76 + Upper Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -1.2 / ( 2.8--1.2 ) = 0.672 + + Lower Coefficient (second LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (second LeakyReLU): 0 + Upper Coefficient (second LeakyReLU): ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias (second LeakyReLU): ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + 0.6 x6 <= x8 <= 0.76 x6 + 0.672 + x8.lb = 0.6 ( 1.2x0 ) = 0.72 x0 + x8.ub = 0.76 ( 1.2x0 + 1.6 ) + 0.672 = 0.912 x0 + 1.888 + x8 range: [-0.72, 2.8] + + 0.6 x7 <= x9 <= 0.6 x7 + 0.8 + x9.lb = 0.6 ( 1.2x1 - 0.8 ) = 0.72 x0 - 0.48 + x9.ub = 0.6 ( 1.2x1 + 0.8 ) + 0.8 = 0.72 x1 + 1.28 + x9 range: [-1.2, 2] + + Layer 3: + + x10 = x8 + x9 + 1 + x10.lb = 0.6 x6 + 0.6 x7 + 1 >= 0.6 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 1 = + 1.2 x4 + 1 >= 1.2 ( 0.6 x2 ) + 1 = 0.72 x2 + 1 + = 0.72 x0 + 0.72 x1 + 1 : [-0.44, 2.44] + x10.lb = ( 0.76 x6 + 0.672 ) + ( 0.6 x7 + 0.8 ) + 1 = 0.76 x6 + 0.6 x7 + 2.472 + >= 0.76 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 2.472 = 1.36 x4 + 0.16 x5 + 2.472 + >= 1.36 ( 0.6 x2 + 0.8 ) + 0.16 ( 0.6 x3 + 0.8 ) + 2.472 + = 0.816 x2 + 0.096 x3 + 3.688 = 0.912 x0 + 0.72 x1 + 3.688 : [2.056, 5.32] + x10 range: [-0.44, 5.32] + + x11.lb = 0.72 x0 - 0.48 : [-1.2, 0.24] + x11.ub = 0.72 x1 + 1.28 : [-0.56, 2] + x11 range: [-1.2, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1.2, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1.2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.72, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -1.2, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -0.44, Tightening::LB ), Tightening( 10, 5.32, Tightening::UB ), + Tightening( 11, -1.2, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda7_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_parameterised_deeppoly_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layers 1, 2, 3: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5.\ + Upper coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6. + Upper coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + 0.5 x2 <= x4 <= 0.6 x2 + 1.2 + x4.lb = 0.5 ( x0 + x1 ) = 0.5 x0 + 0.5 x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [-1, 3] + + 0.5 x3 <= x5 <= 0.4 x3 + 1.2 + x5.lb = 0.5 ( x0 - x1 ) = 0.5 x0 - 0.5 x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [-1.5, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x4 <= x6 <= 3 + x6.lb = 0.5 x0 + 0.5 x1 : [-1, 1.5] + x6.ub = 3 : [3, 3] + x6 range: [-1, 3] + + Layer 4: + + x7 = 2x6 + => 2x4 <= x7 <= 6 + x7.lb = 2 ( 0.5 x0 + 0.5 x1 ) = x0 + x1 : [-2, 3] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -1.5, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + 0 <= x4 <= 0 + x4: all set to 0 + + x3 <= x5 <= x3 + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x5 <= x6 <= x5 + => x3 <= x6 <= x5 + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7 = 2x6 + => x7 = 2x5 = 2x3 = 2x0 - 2x1 + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + } + + void test_parameterised_deeppoly_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, i ); // Using er + symbolicUpperBias[i] = + NLR::Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + + void test_parameterised_deeppoly_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + + x6 = -x0 - x1 - x2 + 2 + x6.lb = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6.ub = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6 range: [ -1.000003, -1 ] + + x7 = -x0 - x1 - x2 + 1 + x7.lb = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7.ub = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7 range: [ -2.000003, -2 ] + */ + + // First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + unsigned size = nlr.getLayer( 2 )->getActivationSources( 0 ).size(); + Vector sourceLbs = { 1.999899, -0.000003, -2.000103 }; + Vector sourceUbs = { 2.000102, 0.0001, -1.999 }; + Vector sourceMids = { 2.0000005, -0.0000015, -2.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1035, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1154, + -0.1017, + -0.0138, + -0.1017, + 0.1036, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + + // Second Sigmoid: x9 x11 = softmax( x4, x6 ). + size = nlr.getLayer( 2 )->getActivationSources( 1 ).size(); + sourceLbs = Vector( { 2.999899, -1.000103 } ); + sourceUbs = Vector( { 3.000102, -0.9999 } ); + sourceMids = Vector( { 3.0000005, -1.0000015 } ); + targetLbs = Vector( size, 0 ); + targetUbs = Vector( size, 0 ); + symbolicLb = Vector( size * size, 0 ); + symbolicUb = Vector( size * size, 0 ); + symbolicLowerBias = Vector( size, 0 ); + symbolicUpperBias = Vector( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( compareVectors( symbolicLowerBias, Vector( { 0.9114, 0.0886 } ) ) ); + TS_ASSERT( compareVectors( symbolicUpperBias, Vector( { 0.9114, 0.0886 } ) ) ); + + /* + Layer 2: + + First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 + x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 + x8 range: [ 0.8668, 0.8668 ] + +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 + x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 + x10.ub = -0.2033 x0 + 0.0000 x1 - 0.2033 x2 + 0.5241 + x10 range: [ 0.1173, 0.1173 ] + +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12.ub = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12 range: [ 0.0159, 0.0159 ] + + Second Sigmoid: x9 x11 = softmax( x4, x6 ). +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9 range: [ 0.9820, 0.0180 ] + +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11 range: [ 0.9820, 0.0180 ] + + Layer 3: + + x13 = x8 + x10 + x12 + => x13 = ( 0.1155 - 0.1017 - 0.0138 ) x3 + ( -0.1017 + 0.1035 - 0.0019 ) x5 + + ( -0.0138 - 0.0019 + 0.0156 ) x7 + ( 0.6084 + 0.3170 + 0.0747 ) + + => x13 = 0 x3 - 0.0001 x5 - 0.0001 x7 + 1.0001 + => ( Up to rounding ) 1 <= x13 <= 1. + x13.lb = 1 + x13.ub = 1 + x13 range: [ 1, 1 ] + + x14 = - x8 - x10 - x12 + => x14 = - ( 0.1155 - 0.1017 - 0.0138 ) x3 - ( -0.1017 + 0.1035 - 0.0019 ) x5 + - ( -0.0138 - 0.0019 + 0.0156 ) x7 - ( 0.6084 + 0.3170 + 0.0747 ) + + => x14 = 0 x3 + 0.0001 x5 + 0.0001 x7 - 1.0001 + => ( Up to rounding ) -1 <= x14 <= -1. + x14.lb = -1 + x14.ub = -1 + x14 range: [ -1, -1 ] + + x15 = x9 + x11 + => x15 = ( 0.0177 - 0.0177 ) x4 + ( -0.0177 + 0.0177 ) x6 + ( 0.9114 + 0.0886 ) + + => x15 = 0 x4 + 0 x6 + 1 + => ( Up to rounding ) 1 <= x15 <= 1. + x15.lb = 1 + x15.ub = 1 + x15 range: [ 1, 1 ] + + x16 = - x9 - x11 + => x16 = - ( 0.0177 - 0.0177 ) x4 - ( -0.0177 + 0.0177 ) x6 - ( 0.9114 + 0.0886 ) + + => x16 = 0 x4 + 0 x6 - 1 + => ( Up to rounding ) -1 <= x16 <= -1. + x16.lb = -1 + x16.ub = -1 + x16 range: [ -1, -1 ] + */ + + List expectedBounds( { + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.86681, Tightening::LB ), Tightening( 8, 0.86682, Tightening::UB ), + Tightening( 9, 0.98201, Tightening::LB ), Tightening( 9, 0.98201, Tightening::UB ), + Tightening( 10, 0.11731, Tightening::LB ), Tightening( 10, 0.11731, Tightening::UB ), + Tightening( 11, 0.017985, Tightening::LB ), Tightening( 11, 0.017986, Tightening::UB ), + Tightening( 12, 0.015875, Tightening::LB ), Tightening( 12, 0.015876, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layers 1, 2: + + x2 = x0 - 2x1 + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficients for bilinear layer: + Lower bound: + alpha_l = 0.5 x3.lb + ( 1 - 0.5 ) x3.ub = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.lb - ( 1 - 0.5 ) x2.ub x3.ub = -0.5 * -1 * -1 - 0.5 * 6 * 3 = + -9.5. + + Upper bound: + alpha_l = 0.5 x3.ub + ( 1 - 0.5 ) x3.lb = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.ub - ( 1 - 0.5 ) x2.ub x3.lb = -0.5 * -1 * 6 - 0.5 * -1 * 3 + = 4.5. + + S = { x2.lb x3.lb, x2.ub x3.lb, x2.lb x3.ub, x2.ub x3.ub } = { 1, -3, -6, 18 } + -6 <= min S <= x4 <= max S = 18 + x2 + 2.5 x3 - 9.5 <= x4 <= x2 + 2.5 x3 + 4.5 + x4.lb = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) - 9.5 = 3.5 x0 + 0.5 x1 - 9.5 : [-7, -2] + x4.ub = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) + 4.5 = 3.5 x0 + 0.5 x1 + 4.5 : [7, 12] + x4 range: [-6, 18] + + Layer 3: + + x5 = -x4 : [-18, 6] + => -x2 - 2.5 x3 - 4.5 <= x4 <= -x2 - 2.5 x3 + 9.5 + x5.lb = -1 ( 3.5 x0 + 0.5 x1 + 4.5 ) = -3.5 x0 - 0.5 x1 - 4.5 : [-12, 0] + x5.ub = -1 ( 3.5 x0 + 0.5 x1 - 9.5 ) = -3.5 x0 - 0.5 x1 + 9.5 : [2, 7] + x5 range: [-12, 6] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -6, Tightening::LB ), + Tightening( 4, 18, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + bool existsBounds( const List &bounds, Tightening bound ) + { + for ( const auto &b : bounds ) + { + if ( b._type == bound._type && b._variable == bound._variable ) + { + if ( FloatUtils::areEqual( b._value, bound._value ) ) + return true; + } + } + return false; + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } + + bool compareVectors( const Vector &vectorA, const Vector &vectorB ) + { + if ( vectorA.size() != vectorB.size() ) + return false; + + for ( unsigned i = 0; i < vectorA.size(); ++i ) + { + if ( !FloatUtils::areEqual( vectorA[i], vectorB[i], 0.0001 ) ) + return false; + } + + return true; } }; diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 7f293ec65f..242916a81e 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -2,7 +2,7 @@ /*! \file Test_LPRelaxation.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu + ** Guy Katz, Andrew Wu, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -73,9 +73,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); @@ -1763,7 +1760,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Softmax/Max sources + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 0, 2, 1 ); nlr.addActivationSource( 1, 0, 2, 2 ); @@ -1774,6 +1771,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 1, 2, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Max sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 0 ); @@ -1822,7 +1820,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 11, large ); } - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { @@ -1888,7 +1885,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 3, 0, 2 ); nlr.setBias( 3, 2, -2 ); - // Mark the Softmax/Max sources + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); @@ -1916,6 +1913,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 1, 4, 2 ); nlr.addActivationSource( 3, 2, 4, 2 ); + // Mark the Max sources nlr.addActivationSource( 4, 0, 5, 0 ); nlr.addActivationSource( 4, 1, 5, 0 ); nlr.addActivationSource( 4, 2, 5, 0 ); @@ -2022,11 +2020,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the ReLU/Bilinear sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Bilinear sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 0 ); @@ -2139,7 +2138,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 3, 0, 2 ); - // Mark the ReLU/Bilinear sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); @@ -2148,6 +2147,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); + // Mark the Bilinear sources nlr.addActivationSource( 4, 0, 5, 0 ); nlr.addActivationSource( 4, 1, 5, 0 ); @@ -2247,7 +2247,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2260,7 +2259,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2289,7 +2287,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2314,7 +2311,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2347,7 +2343,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2384,7 +2379,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2401,7 +2395,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2450,7 +2443,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2486,7 +2478,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2522,7 +2513,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2548,7 +2538,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2559,7 +2548,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2588,7 +2576,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2613,7 +2600,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2643,7 +2629,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2680,7 +2665,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2691,7 +2675,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2740,7 +2723,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2776,7 +2758,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2804,7 +2785,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2830,7 +2810,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2841,7 +2820,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2870,7 +2848,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2895,7 +2872,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2925,7 +2901,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2962,7 +2937,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2973,7 +2947,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3022,7 +2995,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3058,7 +3030,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3086,7 +3057,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3112,7 +3082,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3126,7 +3095,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3155,7 +3123,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3180,7 +3147,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3216,7 +3182,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3253,7 +3218,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3264,7 +3228,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3313,7 +3276,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3349,7 +3311,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3377,7 +3338,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3403,7 +3363,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3414,7 +3373,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3443,7 +3401,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3468,7 +3425,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3498,7 +3454,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3535,7 +3490,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3546,7 +3500,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3595,7 +3548,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3631,7 +3583,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3659,7 +3610,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3685,7 +3635,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3701,7 +3650,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3730,7 +3678,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3755,7 +3702,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3791,7 +3737,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3828,7 +3773,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3851,7 +3795,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3900,7 +3843,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3936,7 +3878,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3977,8 +3918,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4003,7 +3943,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4014,7 +3953,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4043,7 +3981,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4068,7 +4005,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4099,7 +4035,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4130,7 +4065,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4141,7 +4075,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4182,7 +4115,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 17, -large ); tableau.setUpperBound( 17, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4212,7 +4144,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4240,7 +4171,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4266,7 +4196,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -4276,7 +4205,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4305,7 +4233,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4330,7 +4257,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4362,7 +4288,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4391,7 +4316,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4405,7 +4329,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4442,7 +4365,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 15, -large ); tableau.setUpperBound( 15, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4470,7 +4392,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4487,25 +4408,1295 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - bool boundsEqual( const List &bounds, const List &expectedBounds ) + void test_preimage_approximation_relu() { - if ( bounds.size() != expectedBounds.size() ) - return false; + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); - bool allFound = true; - for ( const auto &bound : bounds ) - { - bool currentFound = false; - for ( const auto &expectedBound : expectedBounds ) - { - currentFound |= - ( bound._type == expectedBound._type && - bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); - } - allFound &= currentFound; - } - return allFound; + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + + Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1.625, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + + Tightening( 11, 0.8472, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 12, -1.75, Tightening::LB ), + Tightening( 13, -4.25, Tightening::LB ), + Tightening( 13, 3.25, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + Tightening( 16, 3.25, Tightening::UB ), + + Tightening( 17, -11.1417, Tightening::LB ), + Tightening( 17, 10, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), + Tightening( 19, 10, Tightening::UB ), + + Tightening( 21, -17.3084, Tightening::LB ), + Tightening( 21, 3.2160, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + Tightening( 13, 8.5519, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + Tightening( 16, 8.5519, Tightening::UB ), + + Tightening( 17, -23.6231, Tightening::LB ), + Tightening( 17, 14.0909, Tightening::UB ), + Tightening( 18, 2, Tightening::LB ), + Tightening( 18, 28.2015, Tightening::UB ), + + Tightening( 20, 2, Tightening::LB ), + Tightening( 20, 28.2015, Tightening::UB ), + + Tightening( 21, -29.2015, Tightening::LB ), + Tightening( 21, 6.5734, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 7, -2, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.2, Tightening::LB ), + + Tightening( 10, -1.8, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), + + Tightening( 11, 1.4542, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 3.1, Tightening::UB ), + Tightening( 7, -3.2, Tightening::LB ), + Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -0.2, Tightening::LB ), + Tightening( 8, 3.1, Tightening::UB ), + Tightening( 9, -0.32, Tightening::LB ), + Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -3.8726, Tightening::LB ), + Tightening( 10, 0.03, Tightening::UB ), + Tightening( 11, 0.4074, Tightening::LB ), + Tightening( 11, 11.3243, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 8.5, Tightening::UB ), + Tightening( 12, -2.225, Tightening::LB ), Tightening( 13, 2.975, Tightening::UB ), + Tightening( 13, -4.175, Tightening::LB ), + + Tightening( 14, -0.45, Tightening::LB ), Tightening( 14, 8.5, Tightening::UB ), + Tightening( 15, -0.2225, Tightening::LB ), Tightening( 16, 2.975, Tightening::UB ), + Tightening( 16, -0.4175, Tightening::LB ), + + Tightening( 17, -11.452, Tightening::LB ), Tightening( 17, 10.18, Tightening::UB ), + Tightening( 18, 0.87, Tightening::LB ), Tightening( 18, 16.0688, Tightening::UB ), + + Tightening( 19, -1.1452, Tightening::LB ), Tightening( 19, 10.18, Tightening::UB ), + Tightening( 20, 0.87, Tightening::LB ), Tightening( 20, 16.0688, Tightening::UB ), + + Tightening( 21, -17.0684, Tightening::LB ), Tightening( 21, 3.6767, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 11, -5.6, Tightening::LB ), Tightening( 11, 16.4636, Tightening::UB ), + Tightening( 12, -6.0286, Tightening::LB ), Tightening( 13, -5.9, Tightening::LB ), + Tightening( 13, 8.0468, Tightening::UB ), + + Tightening( 14, -0.56, Tightening::LB ), Tightening( 14, 16.4636, Tightening::UB ), + Tightening( 15, -0.6029, Tightening::LB ), Tightening( 16, -0.59, Tightening::LB ), + Tightening( 16, 8.0468, Tightening::UB ), + + Tightening( 17, -24.8864, Tightening::LB ), Tightening( 17, 14.3076, Tightening::UB ), + Tightening( 18, 0.75, Tightening::LB ), Tightening( 18, 28.0272, Tightening::UB ), + + Tightening( 19, -2.4886, Tightening::LB ), Tightening( 19, 14.3076, Tightening::UB ), + Tightening( 20, 0.75, Tightening::LB ), Tightening( 20, 28.0272, Tightening::UB ), + + Tightening( 21, -29.9648, Tightening::LB ), Tightening( 21, 6.9619, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 10, 8.361, Tightening::UB ), + Tightening( 11, -8.361, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + Tightening( 8, 7, Tightening::UB ), + + Tightening( 9, 5.8235, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), + Tightening( 10, 40.7647, Tightening::UB ), + + Tightening( 11, -40.7647, Tightening::LB ), + Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 12, -1.75, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + + Tightening( 15, -0.0001, Tightening::LB ), + Tightening( 15, 78.8787, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + + Tightening( 15, 0, Tightening::LB ), + Tightening( 15, 211.0082, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() < expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + // Create list of all tightenings in bounds for which there is no bound in newBounds + // or in previousBounds which is at least as tight. + List removeRedundancies( const List &newBounds, + const List &bounds ) + { + List minimalBounds; + unsigned i = 0; + for ( const auto &bound : newBounds ) + { + bool foundTighter = false; + unsigned j = 0; + for ( const auto &otherBound : newBounds ) + { + if ( i < j ) + { + foundTighter |= + ( bound._type == otherBound._type && + bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); + } + ++j; + } + for ( const auto &otherBound : bounds ) + { + foundTighter |= + ( bound._type == otherBound._type && bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); + } + if ( !foundTighter ) + { + minimalBounds.append( bound ); + } + ++i; + } + return minimalBounds; } void updateTableau( MockTableau &tableau, List &tightenings ) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index b74cd3931c..2e41d100f0 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -2,7 +2,7 @@ /*! \file Test_NetworkLevelReasoner.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu + ** Guy Katz, Andrew Wu, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -14,7 +14,6 @@ **/ #include "../../engine/tests/MockTableau.h" // TODO: fix this -#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "Layer.h" #include "NetworkLevelReasoner.h" @@ -480,7 +479,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) { /* @@ -741,11 +739,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Round/Sign sources + // Mark the Abs sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the ReLU sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -813,11 +812,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Round/Sign sources + // Mark the Round sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Sign sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -887,11 +887,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the LeakyReLU/Sigmoid sources + // Mark the LeakyReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Sigmoid sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -957,7 +958,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Softmax/Max sources + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 0, 2, 1 ); nlr.addActivationSource( 1, 0, 2, 2 ); @@ -968,6 +969,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 1, 2, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Max sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 0 ); @@ -1031,11 +1033,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the ReLU/Bilinear sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Bilinear sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 0 ); @@ -1159,7 +1162,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 0, 3, 0, -1 ); nlr.setWeight( 1, 0, 5, 0, 3 ); - nlr.setBias( 3, 0, 1 ); nlr.setBias( 5, 0, 1 ); @@ -1627,7 +1629,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - x0 x3 S x6 x1 x4 S x7 @@ -1641,7 +1642,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x6 x7 x8 = softmax(x3, x4, x5) x9 = x6 + x7 + x8 - x10 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 */ @@ -1676,6 +1677,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 1, 2 ); nlr.setBias( 1, 2, 3 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); @@ -1686,7 +1688,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 1, 1, 2, 2 ); nlr.addActivationSource( 1, 2, 2, 2 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -1729,7 +1730,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - x0 x3 S x8 x1 x4 S x9 @@ -1800,6 +1800,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 3, 2 ); nlr.setBias( 1, 4, 1 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); nlr.addActivationSource( 1, 4, 2, 0 ); @@ -1874,7 +1875,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - x0 x2 x x4 -- x5 x1 x3 @@ -1903,10 +1903,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 1, 1, 1, 1 ); nlr.setWeight( 2, 0, 3, 0, -1 ); + // Mark the Bilinear sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -2188,7 +2188,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); } - void test_evaluate_softmax() { NLR::NetworkLevelReasoner nlr; @@ -2282,23 +2281,24 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 1, 3, 0, 2 ); nlr.setWeight( 2, 2, 3, 1, -2 ); nlr.setWeight( 0, 1, 3, 1, 1 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 1, 5, 0, 1 ); nlr.setWeight( 4, 2, 5, 0, 1 ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); // Evaluate double input[2]; double output; @@ -2839,11 +2839,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 5, 0, 0 ); nlr.setBias( 5, 1, 0 ); - // Mark the ReLU/Abs sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Abs sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -3543,23 +3544,25 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 1, 3, 0, 2 ); nlr.setWeight( 2, 2, 3, 1, -2 ); nlr.setWeight( 0, 1, 3, 1, 1 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 1, 5, 0, 1 ); nlr.setWeight( 4, 2, 5, 0, 1 ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); // Simulate1 @@ -4001,7 +4004,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the ReLU sources + // Mark the Abs sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); @@ -4434,7 +4437,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithRound( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -4566,7 +4568,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithSigmoids( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -4698,7 +4699,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithMax( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -4819,7 +4819,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmax( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -4951,7 +4950,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithBilinear( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -5469,7 +5467,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmaxAndMax( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -6041,7 +6038,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2 (with residual from x0): - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] @@ -6057,7 +6054,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - x5 range: [-2, 4] + x5 range: [-2, 5.5] */ List expectedBounds( { @@ -7461,20 +7458,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) } ); @@ -7483,53 +7480,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_softmax_bounds_er() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 4 }; - Vector input = { -0.5, 1, 2.5 }; - - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); - } - - void test_softmax_bounds_lse1() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 3 }; - Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); - } - void test_sbt_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -7564,12 +7514,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower bound: alpha_l = x3.lb = -1 beta = x2.lb = -1 - gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + gamma_l = -x2.lb x3.lb = --1 * -1 = -1 Upper bound: alpha_u = x3.ub = 3 beta = x2.lb = -1 - gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 + gamma_u = -x2.lb x3.ub = --1 * 3 = 3 x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] @@ -7577,9 +7527,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3: - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + x1 - 1 ) = 2x0 - x1 + 1 : [2, 7] + x5 range: [-21, 7] */ List expectedBounds( { Tightening( 2, -1, Tightening::LB ), @@ -7596,63 +7546,1970 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_concretize_input_assignment() + void test_parameterised_sbt_relus_all_active() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); - populateNetwork( nlr ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // With ReLUs, Inputs are zeros, only biases count - tableau.nextValues[0] = 0; - tableau.nextValues[1] = 0; + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); - Map assignment; + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + /* + Input ranges: - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); + x0: [4, 6] + x1: [1, 5] - TS_ASSERT( assignment.size() == 14 ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); + Layer 1: - // With ReLUs, case 1 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 1; + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + Both ReLUs active, bound survive through activations: - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] - // With ReLUs, case 2 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 2; + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + Layer 2: - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ - TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } + void test_parameterised_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); - void test_obtain_bound_from_ipq() + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relus_active_and_not_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); - Query query; - query.setNumberOfVariables( 14 ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. LbOfUb = 12, using standard ReLU lower + coefficient. Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. LbOfUb = 1, using standard ReLU lower + coefficient. Coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. LbOfUb = 2, using standard ReLU lower + coefficient. Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x1): + + x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + + x5 range: [-2, 5.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 2, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. LbOfUb = 1, using standard ReLU lower + coefficient. Coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. LbOfUb = 2, using standard ReLU lower + coefficient. Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x0): + + x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 + = 7.5] x5 range: [0, 7.5] + + Layer 4: + x6.lb = 1x0 + 1 : [0, 2] + x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x6 range: [0, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. LbOfUb = 2, using standard ReLU lower + coefficient. Coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layer 2: + + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + First ReLU: LbOfUb = 12, using standard ReLU coefficient. + Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 + Coefficient (first ReLU, upper): 1 (propagated as is) + Second ReLU: LbOfUb = 0, using custom lower coefficient 0.5. + Coefficient (second ReLU, lower): 0.5 + Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 + + x8.lb = 0.5 ( x0 ) = 0.5x0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + x9.lb = 0.5 ( x1 - 1 ) = 0.5x1 - 0.5 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [0, 2] + + Layer 3: + + x10.lb = 1 ( 0.5x0 ) + 1 ( 0.5x1 - 0.5 ) + 1 = 0.5x0 + 0.5x1 + 0.5 : [-0.5, 1.5] + x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] + x10 range: [-0.5, 6] + + x11.lb = 0.5x1 - 0.5 + x11.ub = 0.5x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Second absolute value is positive, bounds surive the activation + + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + LbOfUb = -4 < 0, UbOfLb = 12 > 0, using custom coefficients with alpha = { 0.5, 0.5 }. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 * 0.5 = 1/12. + Coefficient (first Sign, upper): -2/-4 * 0.5 = 1/4. + + x4 range: [-1, 1] + x4.lb = 1/12 ( 2x0 + 3x1 - 15 ) - 1 = 2/12 x0 + 3/12 x1 - 27/12 + x4.ub = 1/4 ( 2x0 + 3x1 - 15 ) + 1 = 0.5 x0 + 0.75x1 - 2.75 + + x5 range: [1, 1] + x5.lb = 1 + x5.ub = 1 + + Layer 2: + + x6.lb = 1 ( 2/12 x0 + 3/12 x1 - 27/12 ) - 1 ( 1 ) = 2/12 x0 + 3/12 x1 - 39/12 : + [-28/12 = -7/3, -1] + x6.ub = 1 ( 0.5 x0 + 0.75x1 - 2.75 ) - 1 ( 1 ) = 0.5 x0 + 0.75x1 - 3.75 : [-1, 3] + + x6 range: [-7/3, 3] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2.3333, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + x4: all set to -1 + + x5: all set to 1 + + Layer 2: + + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient: ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias: 0 + Upper Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias: ( 0.2 - 1 ) * 2 * -2 /( 2--2 ) = 0.8 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6 x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6 x0 + 0.6 x1 + 0.8 + x4 range: [-0.4, 2] + + x5.lb = 0.6 ( x0 - x1 ) = 0.6 x0 - 0.6 x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6 x0 - 0.6 x1 + 0.8 + x5 range: [-0.4, 2] + + Layer 2: + + x6.lb = 1 ( 0.6x0 + 0.6x1 ) + 1 ( 0.6x0 - 0.6x1 ) = 1.2 x0 : [-1.2, 1.2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x0 + 1.6 : + [0.4, 2.8] x6 range: [-1.2, 2.8] + + x7.lb = 1 ( 0.6x0 + 0.6x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x1 - 0.8 : [-2, 0.4] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( 0.6x0 - 0.6x1 ) = 1.2 x1 + 0.8 : [-0.4, 2] + x7 range: [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient (first LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (first LeakyReLU): 0 + Upper Coefficient (first LeakyReLU): ( 2.8 - 0.2*-1.2 )/( 2.8--1.2 ) = 3.04/4 = 0.76 + Upper Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -1.2 / ( 2.8--1.2 ) = 0.672 + + Lower Coefficient (second LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (second LeakyReLU): 0 + Upper Coefficient (second LeakyReLU): ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias (second LeakyReLU): ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + x8.lb = 0.6 ( 1.2x0 ) = 0.72 x0 + x8.ub = 0.76 ( 1.2x0 + 1.6 ) + 0.76 = 0.912 x0 + 1.888 + x8 range: [-0.24, 2.8] + + x9.lb = 0.6 ( 1.2x1 - 0.8 ) = 0.72 x0 - 0.48 + x9.ub = 0.6 ( 1.2x1 + 0.8 ) + 0.8 = 0.72 x1 + 1.28 + x9 range: [-0.4, 2] + + Layer 3: + + x10.lb = 1 ( 0.72 x0 ) + 1 ( 0.72 x0 - 0.48 ) + 1 = 1.44 x0 + 0.52 : [-0.92, 2] + x10.ub = 1 ( 0.912 x0 + 1.888 ) + 1 ( 0.72 x1 + 1.28 ) + 1 = 0.912 x0 + 0.72 x1 + 4.168 + : [2.536, 5.8] + x10 range: [-0.92, 5.8] + + x11.lb = 0.72 x0 - 0.48 : [-1.2, 0.24] + x11.ub = 0.72 x1 + 1.28 : [-0.56, 2] + x11 range: [-1.2, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1.2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.24, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -0.92, Tightening::LB ), Tightening( 10, 5.8, Tightening::UB ), + Tightening( 11, -1.2, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_parameterised_sbt_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. LbOfUb > 0, using standard ReLU lower + coefficient. Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 Coefficient (second ReLU): + 2/( 2--3 ) = 2/5 = 0.4 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [0, 3] + + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] + x6.ub = 3 : [3, 3] + x6 range: [-1.2, 3] + + Layer 3: + + x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2.4, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2.4, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + x4: all set to 0 + + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + } + + void test_parameterised_sbt_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + + void test_parameterised_sbt_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + + List expectedBounds( + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layer 1: + + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficients for bilinear layer: + Lower bound: + alpha_l = 0.5 x3.lb + ( 1 - 0.5 ) x3.ub = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.lb - ( 1 - 0.5 ) x2.ub x3.ub = -0.5 * -1 * -1 - 0.5 * 6 * 3 = + -9.5. + + Upper bound: + alpha_l = 0.5 x3.ub + ( 1 - 0.5 ) x3.lb = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.ub - ( 1 - 0.5 ) x2.ub x3.lb = -0.5 * -1 * 6 - 0.5 * -1 * 3 + = 4.5. + + x4.lb = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) - 9.5 = 3.5 x0 + 0.5 x1 - 9.5 : [-7, -2] + x4.ub = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) + 4.5 = 3.5 x0 + 0.5 x1 + 4.5 : [7, 12] + x4 range: [-7, 12] + + Layer 3: + + x5.lb = -1 ( 3.5 x0 + 0.5 x1 + 4.5 ) = -3.5 x0 - 0.5 x1 - 4.5 : [-12, 0] + x5.ub = -1 ( 3.5 x0 + 0.5 x1 - 9.5 ) = -3.5 x0 - 0.5 x1 + 9.5 : [2, 7] + x5 range: [-12, 7] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -7, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), + Tightening( 5, 7, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_concretize_input_assignment() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + + populateNetwork( nlr ); + + // With ReLUs, Inputs are zeros, only biases count + tableau.nextValues[0] = 0; + tableau.nextValues[1] = 0; + + Map assignment; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); + + TS_ASSERT( assignment.size() == 14 ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); + + // With ReLUs, case 1 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); + + // With ReLUs, case 2 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + } + + void test_obtain_bound_from_ipq() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + Query query; + query.setNumberOfVariables( 14 ); // Initialize the bounds query.setLowerBound( 0, -1 ); diff --git a/src/nlr/tests/Test_PMNR.h b/src/nlr/tests/Test_PMNR.h new file mode 100644 index 0000000000..428ecdf515 --- /dev/null +++ b/src/nlr/tests/Test_PMNR.h @@ -0,0 +1,1300 @@ +/********************* */ +/*! \file Test_PMNR.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 14 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + } + + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 14 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + } + + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 14 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + } + + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the Max sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void test_backward_abs_and_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithAbsAndRelu( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 10, 0, Tightening::LB ), + Tightening( 11, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_backward_round_and_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithRoundAndSign( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_backward_leaky_relu_and_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), + Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), + + Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), + Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 7, -0.1, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_backward_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), + Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_backward_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), + + Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_pmnr_abs_and_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithAbsAndRelu( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PMNR for neuron selection. + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 10, 0, Tightening::LB ), + Tightening( 11, 0, Tightening::LB ), + + Tightening( 12, 0, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), + Tightening( 13, 26, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + void test_pmnr_round_and_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithRoundAndSign( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PMNR for neuron selection. + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 9, -4.75, Tightening::LB ), Tightening( 9, 6.75, Tightening::UB ) } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + void test_pmnr_leaky_relu_and_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), + Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), + + Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), + Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PMNR for neuron selection. + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 7, -0.1, Tightening::LB ), + Tightening( 8, -1.5, Tightening::LB ), + Tightening( 8, 7.1, Tightening::UB ), + Tightening( 9, -1.1, Tightening::LB ), + Tightening( 9, 5.1, Tightening::UB ), + Tightening( 10, 0.1541, Tightening::LB ), + Tightening( 10, 0.9992, Tightening::UB ), + Tightening( 11, 0.2422, Tightening::LB ), + Tightening( 11, 0.9942, Tightening::UB ), + Tightening( 12, 0.1541, Tightening::LB ), + Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.8827, Tightening::LB ), + Tightening( 13, 3.9817, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + void test_pmnr_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), + Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PMNR for neuron selection. + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, -0.6812, Tightening::LB ), + Tightening( 8, 1.6790, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + void test_pmnr_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), + + Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PMNR for neuron selection. + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds, bounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + bool infeasibleBranchesEqual( + const List> &infeasibleBranches, + const List> &expectedInfeasibleBranches ) + { + if ( infeasibleBranches.size() != expectedInfeasibleBranches.size() ) + return false; + + bool allFound = true; + for ( const auto &neuronToBranchIndex : infeasibleBranches ) + { + bool currentFound = false; + for ( const auto &expectedNeuronToBranchIndex : expectedInfeasibleBranches ) + { + currentFound |= + neuronToBranchIndexEqual( neuronToBranchIndex, expectedNeuronToBranchIndex ); + } + allFound &= currentFound; + } + return allFound; + } + + bool + neuronToBranchIndexEqual( const Map &neuronToBranchIndex, + const Map &expectedneuronToBranchIndex ) + { + if ( neuronToBranchIndex.size() != expectedneuronToBranchIndex.size() ) + return false; + + bool allFound = true; + for ( const auto &pair : neuronToBranchIndex ) + { + bool currentFound = false; + for ( const auto &expectedPair : expectedneuronToBranchIndex ) + { + currentFound |= ( pair.first._layer == expectedPair.first._layer && + pair.first._neuron == expectedPair.first._neuron && + pair.second == expectedPair.second ); + } + allFound &= currentFound; + } + return allFound; + } + + // Create list of all tightenings in bounds for which there is no bound which is + // at least as tight. + List removeRedundancies( const List &bounds ) + { + List minimalBounds; + unsigned i = 0; + for ( const auto &bound : bounds ) + { + bool foundTighter = false; + unsigned j = 0; + for ( const auto &otherBound : bounds ) + { + if ( i < j ) + { + foundTighter |= + ( bound._type == otherBound._type && + bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); + } + ++j; + } + if ( !foundTighter ) + { + minimalBounds.append( bound ); + } + ++i; + } + return minimalBounds; + } + + // Create list of all tightenings in bounds for which there is no bound in newBounds + // or in previousBounds which is at least as tight. + List removeRedundancies( const List &newBounds, + const List &bounds ) + { + List minimalBounds; + unsigned i = 0; + for ( const auto &bound : newBounds ) + { + bool foundTighter = false; + unsigned j = 0; + for ( const auto &otherBound : newBounds ) + { + if ( i < j ) + { + foundTighter |= + ( bound._type == otherBound._type && + bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); + } + ++j; + } + for ( const auto &otherBound : bounds ) + { + foundTighter |= + ( bound._type == otherBound._type && bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); + } + if ( !foundTighter ) + { + minimalBounds.append( bound ); + } + ++i; + } + return minimalBounds; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } +}; diff --git a/src/nlr/tests/Test_PMNRSelection.h b/src/nlr/tests/Test_PMNRSelection.h new file mode 100644 index 0000000000..e8dcce6d31 --- /dev/null +++ b/src/nlr/tests/Test_PMNRSelection.h @@ -0,0 +1,9957 @@ +/********************* */ +/*! \file Test_PMNRSelection.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 + \ / + \ 3 / + \________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 1, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 1, 0, 5, 0, 3 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 + \ / + \ 1 / + \_______________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 6; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 0, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 0, 0, 5, 0, 1 ); + nlr.setWeight( 5, 0, 6, 0, 1 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 1 1 1 + x0 --- x2 x5 --- x6 x9 --- x10 + \ /\ /\ / \ / \ / + 1 \ / R\ /-1\ / R \ / 1 \ / + \/ \/ \/ \/ \/ + /\ /\ /\ /\ /\ + 1 / \ R/ \ 1/ \ R / \ 1 / \ + / \/ \/ \ / \ / 0 \ + x1 --- x3 x4 --- x7 x8 --- x11 + -1 1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 0 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkSBTAbsoluteValue( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR 1 LR 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ LR / \ LR / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + using LeakyReLU activation instead of ReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.2 ); + nlr.getLayer( 4 )->setAlpha( 0.2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S 1 Rd + x0 --- x2 ---> x4 --- x6 --- x8 + \ / \ / + 1 \ / 1 \ / + \/ \/ + /\ /\ + 1 / \ 1 / \ + / \ S / \ Rd + x1 --- x3 ---> x5 --- x7 --- x9 + -1 -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Round sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 10 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + } + + void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R Max 2 + x0 --- x2 ---> x4 --- x6 ---> x7 + \ / / + 1 \ / / + \/ / + /\ / + 1 / \ / + / \ R / + x1 --- x3 ---> x5 + -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::MAX, 1 ); + nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + nlr.setWeight( 3, 0, 4, 0, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Max sources + nlr.addActivationSource( 2, 0, 3, 0 ); + nlr.addActivationSource( 2, 1, 3, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 8 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + } + + void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x3 S x6 + + x1 x4 S x7 + + x2 x5 S x8 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + + x6 x7 x8 = softmax(x3, x4, x5) + + x9 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + } + + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x3 S x8 + + x1 x4 S x9 + + x2 x5 S x10 + + x6 S x11 + + x7 S x12 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 + + x8 x10 x12 = softmax(x3, x5, x7) + + x9 x11 = softmax(x4, x6) + + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 17 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x2 + x x4 -- x5 + x1 x3 + + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 + + x4 = x2 * x3 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void test_symbolic_bound_maps_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => x2 - x3 <= x6 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + x2 <= x4 <= x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 <= x6 <= x0 + 2x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_symbolic_bound_maps_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = -x0 - x1 : [-11, -5] + x6.ub = -x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_symbolic_bound_maps_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. 12 = ub > -lb = 4, using ReLU lower + coefficient of 1. Upper coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [-4, 12] + x2 <= x4 <= 0.75 x2 + 3 + x4.lb = 2x0 + 3x1 - 15 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + x6.lb = x0 + 2x1 - 15 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [4 + 2 - 15 = -9, 3 + 6.25 - 8.25 = 1] = [-9, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -4, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -9, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + x2 <= x4 <= 0.75x2 + 3 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 - 15 <= x6 <= 0.5x0 + 1.25x1 - 8.25 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 1, 1 } ), + Vector( { 0.75, 1 } ), + Vector( { 0, 0 } ), + Vector( { 3, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 0.75, -1 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 0.5, 1.25 } ), + Vector( { -15 } ), + Vector( { -8.25 } ) ); + + // Non-fixed activation neurons: x4 (RELU). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_bbps_selection_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: x4 - x5 <= x6 <= x4 - x5. + Concretizing x5: x4 - 11 <= x6 <= x4 - 5. + + Lower branch, using x2: [-4, 0], 0 <= x4 <= 0: + Output symbolic bounds -11 <= x6 <= -5. + Upper branch, using x2: [0, 12], x2 <= x4 <= x2: + Output symbolic bounds x2 - 11 <= x6 <= x2 - 5. + + Summing over all branches: + Lower symbolic expression: x2 - 22 >= -26. + Upper symbolic expression: x2 - 10 <= 2. + + Final score = ( 2 - (-26) ) / 2 = 14. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 14 ); + } + + void test_symbolic_bound_maps_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_symbolic_bound_maps_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1. 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x1): + + x5 = 3x4 + 3x1 + 1 + x5.lb = 3 ( -1.5x0 + 0.5 ) + 3 ( x0 ) + 1 = -1.5x0 + 2.5 : [1, 4] + x5.ub = 3 ( -2/3 x0 + 4/3 ) + 3 ( x0 ) + 1 = x0 + 5 : [4, 6] + x5 range: [1, 6] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x2 <= 0.5x1 + 0.5 + + Layer 4 (RELU): + x3 <= x4 <= 2/3 x3 + 2/3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x5 <= x5 <= x5 + + Layer 4: + Using x5 = 3x4 + 3x1 + 1: + 3x4 + 3x1 + 1 <= x5 <= 3x4 + 3x1 + 1 + Concretizing residual using x1 : [-1, 1]: 3x4 - 2 <= x5 <= 3x4 + 4 + + Layer 3: + Using x3 <= x4 <= 2/3 x3 + 2/3: + 3x3 + 3x1 + 1 <= x5 <= 2x3 + 3x1 + 3 + Concretizing residual using x1 : [-1, 1]: 3x3 - 2 <= x5 <= 2x3 + 6 + + Layer 2: + Using x3 = -x2 - x0 + 1: + -3x2 + 3x1 - 3x0 + 4 <= x5 <= -2x2 + 3x1 - 2x0 + 5 + Concretizing residual using x0 : [-1, 1], x1 : [-1, 1]: -3x2 - 2 <= x5 <= -2x2 + 10 + + Layer 1: + Using 0 <= x2 <= 0.5x1 + 0.5: + 1.5x1 - 3x0 + 2.5 <= x5 <= 3x1 - 2x0 + 5 + Concretizing residual using x0 : [-1, 1]: 1.5x1 - 0.5 <= x5 <= 3x1 + 7 + + Layer 0: + Using x1 = x0: + -1.5x0 + 2.5 <= x5 <= x0 + 5 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0 } ), + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 0.6667 } ), + Vector( { 0 } ), + Vector( { 0.6667 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { -2 } ), + Vector( { 4 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 3 } ), + Vector( { 2 } ), + Vector( { -2 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -3 } ), + Vector( { -2 } ), + Vector( { -2 } ), + Vector( { 10 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1.5 } ), + Vector( { 3 } ), + Vector( { -0.5 } ), + Vector( { 7 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1.5 } ), + Vector( { 1 } ), + Vector( { 2.5 } ), + Vector( { 5 } ) ); + + // Non-fixed activation neurons: x2 (RELU), x4 (RELU). + compareNonfixedNeurons( + nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); + } + + void test_bbps_selection_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x1, 0) for x2 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x3, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x2 <= 0. + Upper branch symbolic bounds: x1 <= x2 <= x1. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x3 <= x4 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x2: + Symbolic bounds of output layer in terms of Layer 2: -3x2 - 2 <= x5 <= -2x2 + 10. + + Lower branch, using x1: [-1, 0], 0 <= x4 <= 0: + Output symbolic bounds -2 <= x5 <= 10. + Upper branch, using x1: [0, 1], x2 <= x4 <= x2: + Output symbolic bounds -3x1 - 2 <= x5 <= -2x1 + 10. + + Summing over all branches: + Lower symbolic expression: -3x1 - 4 >= -7. + Upper symbolic expression: -2x1 + 20 <= 22. + + Final score = ( 22 - (-7) ) / 2 = 14.5. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 14.5 ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 4: 3x4 - 2 <= x5 <= 3x4 + 4. + + Lower branch, using x3: [-1, 0], 0 <= x4 <= 0: + Output symbolic bounds -2 <= x5 <= 4. + Upper branch, using x3: [0, 2], x2 <= x4 <= x2: + Output symbolic bounds 3x3 - 2 <= x5 <= 3x3 + 4. + + Summing over all branches: + Lower symbolic expression: -3x3 - 4 >= -7. + Upper symbolic expression: 3x3 + 8 <= 14. + + Final score = ( 14 - (-7) ) / 2 = 10.5. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 10.5 ); + } + + void test_symbolic_bound_maps_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 + x5.lb = 3 ( -1.5x0 + 0.5 ) + 1 ( x0 ) + 1 = -3.5x0 + 2.5 : [-1, 6] + x5.ub = 3 ( -2/3 x0 + 4/3 ) + 1 ( x0 ) + 1 = -x0 + 5 : [4, 6] + x5 range: [-1, 6] + + Layer 6: + x6 = x5 + x6.lb = -3.5x0 + 2.5 : [-1, 6] + x6.ub = -x0 + 5 : [4, 6] + x6 range: [-1, 6] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x2 <= 0.5x1 + 0.5 + + Layer 4 (RELU): + x3 <= x4 <= 2/3 x3 + 2/3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 6: + x6 <= x6 <= x6 + + Layer 5: + Using x6 = x5: + x5 <= x6 <= x5 + + Layer 4: + Using x5 = 3x4 + x0 + 1: + 3x4 + x0 + 1 <= x6 <= 3x4 + x0 + 1 + Concretizing residual using x0 : [-1, 1]: 3x4 <= x6 <= 3x4 + 2 + + Layer 3: + Using x3 <= x4 <= 2/3 x3 + 2/3: + 3x3 + x0 + 1 <= x6 <= 2x3 + x0 + 3 + Concretizing residual using x0 : [-1, 1]: 3x3 <= x6 <= 2x3 + 4 + + Layer 2: + Using x3 = -x2 - x0 + 1: + -3x2 - 2x0 + 4 <= x6 <= -2x2 - x0 + 5 + Concretizing residual using x0 : [-1, 1]: -3x2 + 2 <= x6 <= -2x2 + 6 + + Layer 1: + Using 0 <= x2 <= 0.5x1 + 0.5: + -1.5x1 - 2x0 + 2.5 <= x6 <= -x0 + 5 + Concretizing residual using x0 : [-1, 1]: -1.5x1 + 0.5 <= x6 <= 6 + + Layer 0: + Using x1 = x0: + -3.5x0 + 2.5 <= x6 <= -x0 + 5 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0 } ), + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 0.6667 } ), + Vector( { 0 } ), + Vector( { 0.6667 } ) ); + + compareOutputSymbolicBounds( nlr, + 6, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { 0 } ), + Vector( { 2 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 3 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 4 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -3 } ), + Vector( { -2 } ), + Vector( { 2 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -1.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -3.5 } ), + Vector( { -1 } ), + Vector( { 2.5 } ), + Vector( { 5 } ) ); + + // Non-fixed activation neurons: x2 (RELU), x4 (RELU). + compareNonfixedNeurons( + nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); + } + + void test_bbps_selection_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x1, 0) for x2 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x3, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x2 <= 0. + Upper branch symbolic bounds: x1 <= x2 <= x1. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x3 <= x4 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x2: + Symbolic bounds of output layer in terms of Layer 2: -3x2 + 2 <= x6 <= -2x2 + 6. + + Lower branch, using x1: [-1, 0], 0 <= x4 <= 0: + Output symbolic bounds 2 <= x6 <= 6. + Upper branch, using x1: [0, 1], x2 <= x4 <= x2: + Output symbolic bounds -3x1 + 2 <= x6 <= -2x1 + 6. + + Summing over all branches: + Lower symbolic expression: -3x1 + 4 >= 1. + Upper symbolic expression: -2x1 + 12 <= 14. + + Final score = ( 14 - 1 ) / 2 = 6.5. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6.5 ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 4: 3x4 <= x6 <= 3x4 + 2. + + Lower branch, using x3: [-1, 0], 0 <= x4 <= 0: + Output symbolic bounds 0 <= x6 <= 2. + Upper branch, using x3: [0, 2], x2 <= x4 <= x2: + Output symbolic bounds 3x3 <= x6 <= 3x3 + 2. + + Summing over all branches: + Lower symbolic expression: 3x3 >= -3. + Upper symbolic expression: 3x3 + 4 <= 10. + + Final score = ( 10 - (-3) ) / 2 = 6.5. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 6.5 ); + } + + void test_symbolic_bound_maps_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + 0 <= x4 <= 0.5x2 + 1 + x4.lb = 0 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + 0 <= x5 <= 0.5x3 + 1 + x5.lb = 0 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layers 3, 4: + + x6 = x4 + x5 + x6.lb = 1 ( 0 ) + 1 ( 0 ) = 0 : [0, 0] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [0, 3] + + x7 = x4 - x5 + x7.lb = 1 ( 0 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = - 0.5x0 + 0.5x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0 ) = 0.5x0 + 0.5x1 + 1 : [0, 2] + x7 range: [-2, 2] + + First ReLU is active, bounds surive the activation + Second ReLUs is undecided, bound is concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient (second ReLU): 2/( 2--2 ) = 2/4 = 0.5 + + x6 <= x8 <= x6 + x8.lb = 0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + 0 <= x9 <= 0.5 x7 + 1 + x9.lb = 0 + x9.ub = 0.5 ( 0.5x0 + 0.5x1 + 1 ) + 1 = 0.25x0 + 0.25x1 + 1.5 + x9 range: [0, 2] + + Layer 5: + x10 = x8 + x9 + 1 + x10.lb = 1 ( 0 ) + 1 ( 0 ) + 1 = 1 : [1, 1] + x10.ub = 1 ( x6 ) + 1 ( 0.5 x7 + 1 ) + 1 = 1 ( x4 + x5 ) + 1 ( 0.5 x4 - 0.5x5 + 1 ) + 1 + = 1.5x4 + 0.5x5 + 2 <= 0.75x2 + 0.25x3 + 4 = x0 + 0.5x1 + 4 : [2.5, 5.5] + x10 range: [1, 5.5] + + x11 = x9 + x11.lb = 0 + x11.ub = 0.25x0 + 0.25x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0.5x2 + 1 + 0 <= x5 <= 0.5x3 + 1 + + Layer 4 (RELU): + x6 <= x8 <= x6 + 0 <= x9 <= 0.5 x7 + 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x10 <= x10 <= x10 + x11 <= x11 <= x11 + + Layer 4: + Using x10 = x8 + x9 + 1, x11 = x9: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1 + x9 <= x11 <= x9 + + Layer 3: + Using x6 <= x8 <= x6, 0 <= x9 <= 0.5 x7 + 1: + x6 + 1 <= x10 <= x6 + 0.5 x7 + 2 + 0 <= x11 <= 0.5 x7 + 1 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2 + 0 <= x11 <= 0.5x4 - 0.5x5 + 1 + + Layer 1: + Using 0 <= x4 <= 0.5x2 + 1, 0 <= x5 <= 0.5x3 + 1: + 1 <= x10 <= 0.75x2 + 0.25x3 + 4 + 0 <= x11 <= 0.25x2 + 1.5 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 1 <= x10 <= x0 + 0.5x1 + 4 + 0 <= x11 <= 0.25x2 + 0.25x3 + 1.5 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 0 } ), + Vector( { 0.5, 0.5 } ), + Vector( { 0, 0 } ), + Vector( { 1, 1 } ) ); + + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 0, 1 } ), + Vector( { 0.5, 1 } ), + Vector( { 0, 0 } ), + Vector( { 1, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 0 } ), + Vector( { 1, 0, 0.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2, 1 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 1, 0 } ), + Vector( { 0.5, -0.5, 1.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2, 1 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0 } ), + Vector( { 0.75, 0.25, 0.25, 0 } ), + Vector( { 1, 0 } ), + Vector( { 4, 1.5 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0 } ), + Vector( { 1, 0.25, 0.5, 0.25 } ), + Vector( { 1, 0 } ), + Vector( { 4, 1.5 } ) ); + + // Non-fixed activation neurons: x4 (RELU), x5 (RELU), x9 (RELU). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ) } ) ); + } + + void test_bbps_selection_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 0 } ) ); + + // Using branching point (x3, 0) for x5 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x7, 0) for x9 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 1 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x5 <= 0. + Upper branch symbolic bounds: x3 <= x5 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x9 <= 0. + Upper branch symbolic bounds: x7 <= x9 <= x7. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. + Concretizing x5: x4 + 1 <= x10 <= 1.5x4 + 3, 0 <= x11 <= 0.5x4 + 1. + + Lower branch, using x2: [-2, 0], 0 <= x4 <= 0. + Output symbolic bounds: 1 <= x10 <= 3, 0 <= x11 <= 1. + Upper branch, using x2: [0, 2], x2 <= x4 <= x2: + Output symbolic bounds: x2 + 1 <= x10 <= 1.5x2 + 3, 0 <= x11 <= 0.5x2 + 1. + + Summing over all branches and output neurons: + Lower symbolic expression: x2 + 2 >= 0. + Upper symbolic expression: 2x2 + 8 <= 12. + + Final score = ( 12 - 0 ) / 2 = 6. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 6 ); + + /* Calculating BBPS-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. + Concretizing x4: x5 + 1 <= x10 <= 0.5x5 + 5, 0 <= x11 <= -0.5x5 + 2. + + Lower branch, using x3: [-2, 0], 0 <= x5 <= 0: + Output symbolic bounds: 1 <= x10 <= 5, 0 <= x11 <= 2. + Upper branch, using x3: [0, 2], x3 <= x5 <= x3: + Output symbolic bounds: + x3 + 1 <= x10 <= 0.5x3 + 5, 0 <= x11 <= -0.5x3 + 2. + + Summing over all branches and output neurons: + Lower symbolic expression: x3 + 2 >= 0. + Upper symbolic expression: 14 <= 14. + + Final score = ( 14 - 0 ) / 2 = 7. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 7 ); + + /* Calculating BBPS-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + Concretizing x8: x9 + 1 <= x10 <= x9 + 4, x9 <= x11 <= x9. + + Lower branch, using x7: [-2, 0], 0 <= x9 <= 0: + Output symbolic bounds: 1 <= x10 <= 4, 0 <= x11 <= 0. + Lower branch, using x7: [0, 2], 0 <= x9 <= 0: + Output symbolic bounds: x7 + 1 <= x10 <= x7 + 4, x7 <= x11 <= x7. + + Summing over all branches and output neurons: + Lower symbolic expression: 2x7 + 2 >= -2. + Upper symbolic expression: 2x7 + 8 <= 12. + + Final score = ( 12 - (-2) ) / 2 = 7. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 7 ); + } + + void test_symbolic_bound_maps_absolute_values_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => x2 - x3 <= x5 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + x2 <= x4 <= x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 <= x6 <= x0 + 2x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_symbolic_bound_maps_absolute_values_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => -x2 - x3 <= x5 <= -x2 - x3 + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + -x2 <= x4 <= -x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -x2 <= x4 <= -x2, x3 <= x5 <= x3: + -x2 - x3 <= x6 <= -x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1 - 30, x3 = x0 + x1: + -3x0 - 4x1 + 30 <= x6 <= -3x0 - 4x1 + 30 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -3, -4 } ), + Vector( { -3, -4 } ), + Vector( { 30 } ), + Vector( { 30 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_symbolic_bound_maps_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + 0 <= x4 <= 12 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x3 <= x6 <= -x3 + 12 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + 0 <= x4 <= 12 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using 0 <= x4 <= 12, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + 12 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + 12 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 12, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + + // Non-fixed activation neurons: x4 (ABSOLUTE_VALUE). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_bbps_selection_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (ABSOLUTE_VALUE). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: -x2 <= x4 <= -x2. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: x4 - x5 <= x6 <= x4 - x5. + Concretizing x5: x4 - 11 <= x6 <= x4 - 5. + + Lower branch, using x2: [-4, 0], -x2 <= x4 <= -x2: + Output symbolic bounds -x2 - 11 <= x6 <= -x2 - 5. + Upper branch, using x2: [0, 12], x2 <= x4 <= x2: + Output symbolic bounds x2 - 11 <= x6 <= x2 - 5. + + Summing over all branches: + Lower symbolic expression: -22 >= -22. + Upper symbolic expression: -10 <= -10. + + Final score = ( -10 + (-22) ) / 2 = 6. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6 ); + } + + void test_symbolic_bound_maps_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4: all set to 3 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x2 - x3 <= x6 <= -x2 - x3 + => -x3 + 3 <= x6 <= -x3 + 3 + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + -x2 <= x4 <= -x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -x2 <= x4 <= -x2, x3 <= x5 <= x3: + -x2 - x3 <= x6 <= -x2 - x3 + x2 = -3 is eliminated. + -x3 + 3 <= x6 <= -x3 + 3 + + Layer 0: + Using x3 = x0 + x1: + - x0 - x1 + 3 <= x6 <= - x0 - x1 + 3 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_symbolic_bound_maps_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSign( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. + + 1/6 x2 - 1 <= x4 <= 1/2 x2 + 1 + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 + x4 range: [-1, 1] + + 1 <= x5 <= 1 + x5.lb = 1 + x5.ub = 1 + x5 range: [1, 1] + + Layer 3: + + x6 = x4 - x5 : [-2, 0] + => 1/6 x2 - 2 <= x6 <= 1/2 x2 : [-8/3, 6] + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 1/3 x0 + 1/2 x1 - 4.5 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] + + x6 range: [-2, 0] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 0, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGN): + 1/6 x2 - 1 <= x4 <= 1/2 x2 + 1 + 1 <= x5 <= 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using 1/6 x2 - 1 <= x4 <= 1/2 x2 + 1, 1 <= x5 <= 1: + 1/6 x2 - 2 <= x6 <= 1/2 x2 + + Layer 0: + Using x2 = 2x0 + 3x1 - 15: + 1/3 x0 + 1/2 x1 - 4.5 <= x6 <= x0 + 1.5x1 - 7.5 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1667, 0 } ), + Vector( { 0.5, 0 } ), + Vector( { -1, 1 } ), + Vector( { 1, 1 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.1667, 0 } ), + Vector( { 0.5, 0 } ), + Vector( { -2 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.3333, 0.5 } ), + Vector( { 1, 1.5 } ), + Vector( { -4.5 } ), + Vector( { -7.5 } ) ); + + // Non-fixed activation neurons: x4 (SIGN). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_bbps_selection_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSign( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (SIGN). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: -1 <= x4 <= -1. + Upper branch symbolic bounds: 1 <= x4 <= 1. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: x4 - x5 <= x6 <= x4 - x5. + Concretizing x5: x4 - 11 <= x6 <= x4 - 5. + + Lower branch, using x2: [-4, 0], -1 <= x4 <= -1: + Output symbolic bounds -2 <= x6 <= -2. + Upper branch, using x2: [0, 12], 1 <= x4 <= 1: + Output symbolic bounds 0 <= x6 <= 0. + + Summing over all branches: + Lower symbolic expression: -2 >= -2. + Upper symbolic expression: 0 <= 0. + + Final score = ( (-2) - (-2) ) / 2 = 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + } + + void test_symbolic_bound_maps_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSign( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + -1 <= x4 <= 1 + x4: all set to -1 + + 1 <= x5 <= 1 + x5: all set to 1 + + Layer 3: + + x6 = x5 - x4 + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGN): + -1 <= x4 <= -1 + 1 <= x5 <= 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -1 <= x4 <= -1, 1 <= x5 <= 1: + -2 <= x6 <= -2 + + Layer 0: + -2 <= x6 <= -2 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_symbolic_bound_maps_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + x2 <= x4 <= 0.6 x2 + 0.8 + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-2, 2] + + x3 <= x5 <= 0.6 x3 + 0.8 + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-2, 2] + + Layers 3, 4: + + x6 = x4 + x5 + => x2 + x3 <= x6 <= 0.6 x2 + 0.6 x3 + 1.6 + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7 = x4 - x5 + => x2 - 0.6x3 - 0.8 <= x6 <= 0.6 x2 - x3 + 0.8 + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 = 2/3 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x6 <= x8 <= 10/15 x6 + 14/15 + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-2, 2.8] + + x7 <= x9 <= 0.6x7 + 1.12 + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] + + Layer 5: + + x10 = x8 + x9 + 1 + => x6 + x7 + 1 <= x10 <= 2/3 x6 + 0.6 x7 + 229/75 + => 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75 + => 2x2 + 1 <= x10 <= 0.76 x2 + 0.04 x3 + 4.12 + x10.lb = 2x0 + 2x1 + 1 : [-3, 5] + x10.ub = 0.8 x0 + 0.72 x1 + 4.12 : [2.6, 5.64] + x10 range: [-3, 5.64] + + x11 = x9 + => x7 <= x11 <= 0.6x7 + 1.12 + => x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12 + => x2 - 0.6x3 - 0.8 <= x11 <= 0.36 x2 - 0.6 x3 + 1.6 + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -2, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -2, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -2.8, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 5.64, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (LEAKY_RELU): + x2 <= x4 <= 0.6 x2 + 0.8 + x3 <= x5 <= 0.6 x3 + 0.8 + + Layer 4 (LEAKY_RELU): + x6 <= x8 <= 2/3 x6 + 14/15 + x7 <= x9 <= 0.6x7 + 1.12 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x10 <= x10 <= x10 + x11 <= x11 <= x11 + + Layer 4: + Using x10 = x8 + x9 + 1, x11 = x9: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1 + x9 <= x11 <= x9 + + Layer 3: + Using x6 <= x8 <= 2/3 x6 + 14/15, x7 <= x9 <= 0.6x7 + 1.12: + x6 + x7 + 1 <= x10 <= 2/3 x6 + 0.6 x7 + 229/75 + x7 <= x11 <= 0.6x7 + 1.12 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75 + x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12 + + Layer 1: + Using x2 <= x4 <= 0.6 x2 + 0.8, x3 <= x5 <= 0.6 x3 + 0.8: + 2x2 + 1 <= x10 <= 0.76 x2 + 0.04 x3 + 4.12 + x2 - 0.6x3 - 0.8 <= x11 <= 0.36 x2 - 0.6 x3 + 1.6 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 2x0 + 2x1 + 1 <= x10 <= 0.8 x0 + 0.72 x1 + 4.12 + 0.4x0 + 1.6x1 - 0.8 <= x11 <= -0.24 x0 + 0.96 x1 + 1.6 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 1, 1 } ), + Vector( { 0.6, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.8, 0.8 } ) ); + + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 1, 1 } ), + Vector( { 0.6667, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.9333, 1.12 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 1, 1 } ), + Vector( { 0.6667, 0, 0.6, 0.6 } ), + Vector( { 1, 0 } ), + Vector( { 3.0533, 1.12 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 2, 1, 0, -1 } ), + Vector( { 1.2667, 0.6, 0.0667, -0.6 } ), + Vector( { 1, 0 } ), + Vector( { 3.0533, 1.12 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 2, 1, 0, -0.6 } ), + Vector( { 0.76, 0.36, 0.04, -0.6 } ), + Vector( { 1, -0.8 } ), + Vector( { 4.12, 1.6 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 2, 0.4, 2, 1.6 } ), + Vector( { 0.8, -0.24, 0.72, 0.96 } ), + Vector( { 1, -0.8 } ), + Vector( { 4.12, 1.6 } ) ); + + // Non-fixed activation neurons: x4 (LEAKY_RELU), x5 (LEAKY_RELU), x8 (LEAKY_RELU), x9 + // (LEAKY_RELU). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); + } + + void test_bbps_selection_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (LEAKY_RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x3, 0) for x5 (LEAKY_RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 0 } ) ); + + // Using branching point (x6, 0) for x8 (LEAKY_RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 0 ), 0 } ) ); + + // Using branching point (x7, 0) for x9 (LEAKY_RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 1 ), + std::pair( { NLR::NeuronIndex( 3, 1 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0.2 x2 <= x4 <= 0.2 x2. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0.2, 1 } ), + Vector( { 0.2, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0.2 x3 <= x5 <= 0.2 x3. + Upper branch symbolic bounds: x3 <= x5 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0.2, 1 } ), + Vector( { 0.2, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0.2 x6 <= x8 <= 0.2 x6. + Upper branch symbolic bounds: x6 <= x8 <= x6. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0.2, 1 } ), + Vector( { 0.2, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0.2 x7 <= x9 <= 0.2 x7. + Upper branch symbolic bounds: x7 <= x9 <= x7. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 1 ), + Vector( { 0.2, 1 } ), + Vector( { 0.2, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75, x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12. + Concretizing x5: 2x4 + 1 <= x10 <= 19/15 x4 + 239/75, x4 - 2 <= x11 <= 0.6x4 + 2.32. + + Lower branch, using x2: [-2, 0], 0.2 x2 <= x4 <= 0.2 x2: Output symbolic bounds: + 0.4 x2 + 1 <= x10 <= 19/75 x2 + 239/75, 0.2 x2 - 2 <= x11 <= 0.12 x2 + 2.32. + Upper branch, using x6: [0, 2], x2 <= x4 <= x2: Output symbolic bounds: + 2x2 + 1 <= x10 <= 19/15 x2 + 239/75, x2 - 2 <= x11 <= 0.6x2 + 2.32. + + Summing over all branches and output neurons: + Lower symbolic expression: 3.6 x2 - 2 >= -9.2. + Upper symbolic expression: 2.24 x2 + 826/75 <= 1162/75. + + Final score = ( 1162/75 - (-9.2) ) / 2 = 926/75 = 12.3467. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 12.3467 ); + + /* Calculating BBPS-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75, x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12. + Concretizing x4: -3 <= x10 <= 1/15 x5 + 419/75, -x5 - 2 <= x11 <= -0.6x5 + 2.32. + + Lower branch, using x3: [-2, 0], 0.2 x3 <= x5 <= 0.2 x3: Output symbolic bounds: + -3 <= x10 <= 1/75 x3 + 419/75, -0.2 x3 - 2 <= x11 <= -0.12 x3 + 2.32. + Upper branch, using x3: [0, 2], x3 <= x5 <= x3: Output symbolic bounds: + -3 <= x10 <= 1/15 x3 + 419/75, -x3 - 2 <= x11 <= -0.6x3 + 2.32. + + Summing over all branches and output neurons: + Lower symbolic expression: -1.2 x3 - 10 >= -12.4. + Upper symbolic expression: -0.64 x3 + 1186/75 <= 1282/75. + + Final score = ( 1282/75 - (-12.4) ) / 2 = 1106/75 = 14.7467. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 14.7467 ); + + /* Calculating BBPS-based PMNR score of x8: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + Concretizing x9: x8 - 1.8 <= x10 <= x8 + 3.8, -2.8 <= x11 <= 2.8. + + Lower branch, using x6: [-2, 0], 0.2 x6 <= x8 <= 0.2 x6: Output symbolic bounds: + 0.2 x6 - 1.8 <= x10 <= 0.2 x6 + 3.8, -2.8 <= x11 <= 2.8. + Lower branch, using x6: [0, 2.8], x6 <= x8 <= x6: Output symbolic bounds: + x6 - 1.8 <= x10 <= x6 + 3.8, -2.8 <= x11 <= 2.8. + + Summing over all branches and output neurons: + Lower symbolic expression: 1.2 x6 - 9.2 >= -11.6. + Upper symbolic expression: 1.2 x6 + 13.2 <= 16.56. + + Final score = ( 16.56 - (-11.6) ) / 2 = 14.08. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 14.08 ); + + /* Calculating BBPS-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + Concretizing x8: x9 - 1 <= x10 <= x9 + 3.8, x9 <= x11 <= x9. + + Lower branch, using x7: [-2.8, 0], 0.2 x7 <= x9 <= 0.2 x7: Output symbolic bounds: + 0.2 x7 - 1 <= x10 <= 0.2 x7 + 3.8, 0.2 x7 <= x11 <= 0.2 x7. + Lower branch, using x7: [0, 2.8], x7 <= x9 <= x7: Output symbolic bounds: + x7 - 1 <= x10 <= x7 + 3.8, x7 <= x11 <= x7. + + Summing over all branches and output neurons: + Lower symbolic expression: 2.4 x7 - 2 >= -8.72. + Upper symbolic expression: 2.4 x7 + 7.6 <= 14.32. + + Final score = ( 14.32 + (-8.72) ) / 2 = 11.52. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 11.52 ); + } + + void test_symbolic_bound_maps_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda8_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGMOID): + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708 + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708 + + Layer 4 (ROUND): + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x8 <= x8 <= x8 + x9 <= x9 <= x9 + + Layer 3: + Using x6 - 0.5 <= x8 <= x6 + 0.5, x7 - 0.5 <= x9 <= x7 + 0.5: + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5 + x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5 + + Layer 1: + Using + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708, + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708: + 0.1050 x2 + 0.1050 x3 + 0.1584 <= x8 <= 0.1050 x2 + 0.1050 x3 + 1.8416 + 0.1050 x2 - 0.1050 x3 - 0.8416 <= x9 <= 0.1050 x2 - 0.1050 x3 + 0.8516 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.2100 x0 + 0.1584 <= x8 <= 0.2100 x0 + 1.8416 + 0.2100 x1 - 0.8416 <= x9 <= 0.2100 x1 + 0.8516 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.3292, 0.3292 } ), + Vector( { 0.6708, 0.6708 } ) ); + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, 1, 1, -1 } ), + Vector( { 1, 1, 1, -1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + + // Non-fixed activation neurons: x4 (SIGMOID), x5 (SIGMOID), x8 (ROUND), x9 (ROUND). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); + } + + void test_bbps_selection_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, -2/101) for x4 (SIGMOID). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), -0.0198 } ) ); + + // Using branching point (x3, -2/101) for x5 (SIGMOID). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), -0.0198 } ) ); + + // Using branching point (x6, 0.5) for x8 (ROUND). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 0 ), 0.5 } ) ); + + // Using branching point (x7, -0.5) for x9 (ROUND). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 1 ), + std::pair( { NLR::NeuronIndex( 3, 1 ), -0.5 } ) ); + + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = -2/101 + l5 = l6 = g(-2) + u5 = u6 = g(-2/101) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + + ''' + Layer 2 Sigmoid linear relaxation, lower branches x2: [-2, 2/101], x3: [-2, 2/101]: + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7 * x2 + ( g(u3) - lambda7 * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8 * x3 + ( g(u4) - lambda8 * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda7) + print(lambda8_prime) + print(lambda8) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7 * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8 * u4) + + l3 = l4 = -2/101 + u3 = u4 = 2 + l5 = l6 = g(-2/101) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + + ''' + Layer 2 Sigmoid linear relaxation, upper branches x2: [-2/101, 2], x3: [-2/101, 2]: + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda8_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + --- + [output]: + ------------------ + 0.1049935854035065 + 0.18980260606696492 + 0.1049935854035065 + 0.18980260606696492 + 0.3291900928291306 + 0.4988081341560474 + 0.3291900928291306 + 0.4988081341560474 + ------------------ + 0.10499358540350662 + 0.10499358540350662 + 0.49712874760825615 + 0.6708099071708691 + 0.49712874760825615 + 0.6708099071708691 + + Lower branch symbolic bounds: 0.1050 x2 + 0.3292 <= x4 <= 0.1898 x2 + 0.4988. + Upper branch symbolic bounds: 0.1050 x2 + 0.4971 <= x4 <= 0.1050 x2 + 0.6708. + + Lower branch symbolic bounds: 0.1050 x3 + 0.3292 <= x5 <= 0.1845 x3 + 0.4988. + Upper branch symbolic bounds: 0.1050 x3 + 0.4971 <= x5 <= 0.1050 x3 + 0.6708. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.1898, 0.1050 } ), + Vector( { 0.3292, 0.4971 } ), + Vector( { 0.4988, 0.6708 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.1898, 0.1050 } ), + Vector( { 0.3292, 0.4971 } ), + Vector( { 0.4988, 0.6708 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x8 <= 0. + Upper branch symbolic bounds: x6 - 0.5 <= x8 <= x6 + 0.5. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, -0.5 } ), + Vector( { 0, 0.5 } ) ); + + /* + Lower branch symbolic bounds: -1 <= x9 <= -1. + Upper branch symbolic bounds: x7 - 0.5 <= x9 <= x7 + 0.5. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 1 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { -1, -0.5 } ), + Vector( { -1, 0.5 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. + Concretizing x5: x4 - 0.3808 <= x8 <= x4 + 1.3808, x4 - 1.3808 <= x9 <= x4 + 0.3808. + + Lower branch, using x2: [-2, -2/11], 0.1050 x2 + 0.3292 <= x4 <= 0.1845 x2 + 0.4882: + Output symbolic bounds: + 0.1050 x2 - 0.0516 <= x8 <= 0.1898 x2 + 1.8796, + 0.1050 x2 - 1.0516 <= x9 <= 0.1898 x2 + 0.8796. + Upper branch, using x2: [-2/11, 2], 0.1050 x2 + 0.4737 <= x4 <= 0.1050 x2 + 0.6708: + Output symbolic bounds: + 0.1050 x2 + 0.1163 <= x8 <= 0.1050 x2 + 2.0516, + 0.1050 x2 - 0.8837 <= x9 <= 0.1050 x2 + 1.0516. + + Summing over all branches and output neurons: + Lower symbolic expression: 0.4200 x2 - 1.8705 >= -2.7105. + Upper symbolic expression: 0.5896 x2 + 5.8624 <= 7.0416. + + Final score = ( 7.0416 - (-2.7105) ) / 2 = 4.8761. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 4.8761 ); + + /* Calculating BBPS-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. + Concretizing x4: x5 - 0.3808 <= x8 <= x5 + 1.3808, -x5 - 0.3808 <= x9 <= -x5 + 1.3808. + + // 0.1050 x2 + 0.3292 <= x4 <= 0.1898 x2 + 0.4988 + // 0.1050 x2 + 0.4971 <= x4 <= 0.1050 x2 + 0.6708 + Lower branch, using x2: [-2, -2/11], 0.1050 x3 + 0.3292 <= x5 <= 0.1845 x3 + 0.4882: + Output symbolic bounds: + 0.1050 x3 - 0.0516 <= x8 <= 0.1898 x3 + 1.8796, + -0.1898 x3 - 0.8796 <= x9 <= -0.1050 x3 + 1.0516. + Upper branch, using x2: [-2/11, 2], 0.1050 x3 + 0.4737 <= x5 <= 0.1050 x3 + 0.6708: + Output symbolic bounds: + 0.1050 x3 + 0.1163 <= x8 <= 0.1050 x2 + 2.0516, + -0.1050 x3 - 1.0516 <= x9 <= -0.1050 x3 + 0.8837. + + Summing over all branches and output neurons: + Lower symbolic expression: -0.0848 x3 - 1.8665 >= -2.0361. + Upper symbolic expression: 0.0848 x3 + 5.8665 <= 6.0361. + + Final score = ( 6.0361 - (-2.0361) ) / 2 = 4.0361. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 4.0361 ); + + /* Calculating BBPS-based PMNR score of x8: + Symbolic bounds of output layer in terms of Layer 4: x8 <= x8 <= x8, x9 <= x9 <= x9. + Concretizing x9: x8 <= x8 <= x8, -1 <= x9 <= 1. + + Lower branch, using x6: [0.4483, 0.5], 0 <= x8 <= 0: + Output symbolic bounds: 0 <= x8 <= 0, -1 <= x9 <= 1. + Upper branch, using x6: [0.5, 1.5516], x6 - 0.5 <= x8 <= x6 + 0.5: + Output symbolic bounds: x6 - 0.5 <= x8 <= x6 + 0.5, -1 <= x9 <= 1. + + Summing over all branches and output neurons: + Lower symbolic expression: x6 - 2.5 >= -2.0517. + Upper symbolic expression: x6 + 2.5 <= 4.0517. + + Final score = ( 4.0517 - (-2.0517) ) / 2 = 3.0517. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 3.0517 ); + + /* Calculating BBPS-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: x8 <= x8 <= x8, x9 <= x9 <= x9. + Concretizing x8: 0 <= x8 <= 2, x9 <= x9 <= x9. + + Lower branch, using x7: [-0.5516, -0.5], -1 <= x9 <= -1: + Output symbolic bounds: 0 <= x8 <= 2, -1 <= x9 <= -1. + Upper branch, using x7: [-0.5, 0.5516], x7 - 0.5 <= x9 <= x7 + 0.5: + Output symbolic bounds: 0 <= x8 <= 2, x7 - 0.5 <= x9 <= x7 + 0.5. + + Summing over all branches and output neurons: + Lower symbolic expression: x7 - 1.5 >= -2.0517. + Upper symbolic expression: x7 + 3.5 <= 4.0517. + + Final score = ( 4.0517 - (-2.0517) ) / 2 = 3.0517. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 3.0517 ); + } + + void test_symbolic_bound_maps_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layers 1, 2, 3: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + First ReLU: 3 = ub > -lb = 2, using lower ReLU coefficient of 1. + Upper coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6. + First ReLU: 2 = ub <= -lb = 3, using lower ReLU coefficient of 0. + Upper coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x2 <= x4 <= 0.6 x2 + 1.2 + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [-2, 3] + + 0 <= x5 <= 0.4 x3 + 1.2 + x5.lb = 0 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x5, and its upper bound is constant 3. + + x5 <= x6 <= 3 + x6.lb = 0 : [0, 0] + x6.ub = 3 : [3, 3] + x6 range: [0, 3] + + Layer 4: + + x7 = 2x6 + => 2x5 <= x7 <= 6 + x7.lb = 2 ( 0 ) = 0 : [0, 0] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [0, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -2, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + x2 <= x4 <= 0.6 x2 + 1.2 + 0 <= x5 <= 0.4 x3 + 1.2 + + Layer 3 (MAX): + x5 <= x6 <= 6 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x7 <= x7 <= x7 + + Layer 3: + Using x7 = 2x6: + 2x6 <= x7 <= 2x6 + + Layer 2: + Using x5 <= x6 <= 3: + 2x5 <= x7 <= 6 + + Layer 1: + Using 0 <= x5 <= 0.4 x3 + 1.2: + 0 <= x7 <= 6 + + Layer 0: + 0 <= x7 <= 6 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 1, 0 } ), + Vector( { 0.6, 0.4 } ), + Vector( { 0, 0 } ), + Vector( { 1.2, 1.2 } ) ); + comparePredecessorSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + + // Non-fixed activation neurons: x4 (RELU), x5 (RELU), x6 (MAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 3, 0 ) } ) ); + } + + void test_bbps_selection_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x3, 0) for x5 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 0 } ) ); + + // Using branching point (x5, 16/101) for x6 (MAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 3, 0 ), + std::pair( { NLR::NeuronIndex( 2, 1 ), 0.1584 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x5 <= 0. + Upper branch symbolic bounds: x3 <= x5 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch, x4: [-2, 3], x5: [0, 6/11]: + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x5, and its upper bound is constant 3. + + Upper branch, x4: [-2, 3], x5: [6/11, 2]: + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x5, and its upper bound is constant 3. + + Lower branch symbolic bounds: x5 <= x6 <= 3. + Upper branch symbolic bounds: x5 <= x6 <= 3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 3, 0 ), + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 3, 3 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: 2x5 <= x7 <= 6. + Concretizing x5: 0 <= x6 <= 6. + + Lower branch, using x2: [-2, 0], 0 <= x4 <= 0: + Output symbolic bounds 0 <= x6 <= 6. + Upper branch, using x2: [0, 3], x2 <= x4 <= x2: + Output symbolic bounds 0 <= x6 <= 6. + + Summing over all branches: + Lower symbolic expression: 0 >= 0. + Upper symbolic expression: 12 <= 12. + + Final score = ( 12 - 0 ) / 2 = 6. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6 ); + + /* Calculating BBPS-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: 2x5 <= x7 <= 6. + + Lower branch, using x3: [-3, 0], 0 <= x5 <= 0: + Output symbolic bounds 0 <= x6 <= 6. + Upper branch, using x3: [0, 2], x3 <= x5 <= x3: + Output symbolic bounds 2x3 <= x6 <= 6. + + Summing over all branches: + Lower symbolic expression: 2x3 >= -6. + Upper symbolic expression: 12 <= 12. + + Final score = ( 12 - (-6) ) / 2 = 9. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 9 ); + + /* Calculating BBPS-based PMNR score of x6: + Symbolic bounds of output layer in terms of Layer 3: 2x6 <= x7 <= 2x6. + + Lower branch, x5: [0, 6/11], using x5 <= x6 <= 3: + Output symbolic bounds 2x5 <= x6 <= 6. + Upper branch, x5: [6/11, 2], using x5 <= x6 <= 3: + Output symbolic bounds 2x5 <= x6 <= 6. + + Summing over all branches: + Lower symbolic expression: 4x4 >= 0. + Upper symbolic expression: 12 <= 12. + + Final score = ( 12 - 0 ) / 2 = 6. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 3, 0 ), 6 ); + } + + void test_symbolic_bound_maps_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + 0 <= x4 <= 0 + x4: all set to 0 + + x3 <= x5 <= x3 + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x5 <= x6 <= x5 + => x3 <= x6 <= x5 + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7 = 2x6 + => x7 = 2x5 = 2x3 = 2x0 - 2x1 + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Layer 3 (MAX): + x5 <= x6 <= x5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x7 <= x7 <= x7 + + Layer 3: + Using x7 = 2x6: + 2x6 <= x7 <= 2x6 + + Layer 2: + Using x5 <= x6 <= x5: + 2x5 <= x7 <= 2x5 + + Layer 1: + Using x3 <= x5 <= x3: + 2x3 <= x7 <= 2x3 + + Layer 0: + Using x3 = x0 - x1 + 2x0 - 2x1 <= x7 <= 2x0 - 2x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + comparePredecessorSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 2, -2 } ), + Vector( { 2, -2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_symbolic_bound_maps_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + } + + void test_symbolic_bound_maps_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x9 <= x9 <= x9 + x10 <= x10 <= x10 + + Layer 2: + Using x9 = x6 + x7 + x8, x10 = -x6 - x7 - x8: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Layer 1: + Using + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277: 1 <= x9 <= 1 -1 <= x10 <= -1 + + Layer 0: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + + // Non-fixed activation neurons: x6 (SOFTMAX), x7 (SOFTMAX), x8 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ) } ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, i ); // Using er + symbolicUpperBias[i] = + NLR::Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x9 <= x9 <= x9 + x10 <= x10 <= x10 + + Layer 2: + Using x9 = x6 + x7 + x8, x10 = -x6 - x7 - x8: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Layer 1: + Using + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277: 1 <= x9 <= 1 -1 <= x10 <= -1 + + Layer 0: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + + // Non-fixed activation neurons: x6 (SOFTMAX), x7 (SOFTMAX), x8 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ) } ) ); + } + } + + void test_bbps_selection_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x3, 2) for x6 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 2 } ) ); + + // Using branching point (x4, 3) for x7 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 3 } ) ); + + // Using branching point (x5, 0) for x8 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 2 ), + std::pair( { NLR::NeuronIndex( 1, 2 ), 0 } ) ); + + /* + Symbolic bounds of x6 in terms of predecessor (for both branches, since range(x3) < + 0.0001): 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + + 0.4243. Concretizing x4, x5: 0.1922 x3 - 0.1248 <= x6 <= 0.1922 x3 - 0.1248. + + Symbolic bounds of x7 in terms of predecessor (for both branches, since range(x4) < + 0.0001): -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. Concretizing x3, x5: 0.2078 x4 + 0.0819 <= x7 <= 0.2078 x4 + 0.0819. + + Symbolic bounds of x8 in terms of predecessor (for both branches, since range(x5) < + 0.0001): -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277. Concretizing x3, x4: 0.0339 x5 + 0.0351 <= x8 <= 0.0339 x5 + 0.0351. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0.1922, 0.1922 } ), + Vector( { 0.1922, 0.1922 } ), + Vector( { -0.1248, -0.1248 } ), + Vector( { -0.1248, -0.1248 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0.2078, 0.2078 } ), + Vector( { 0.2078, 0.2078 } ), + Vector( { 0.0819, 0.0819 } ), + Vector( { 0.0819, 0.0819 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 2 ), + Vector( { 0.0339, 0.0339 } ), + Vector( { 0.0339, 0.0339 } ), + Vector( { 0.0351, 0.0351 } ), + Vector( { 0.0351, 0.0351 } ) ); + + /* + Calculating BBPS-based PMNR score of x6, x7, x8: + Symbolic bounds of output layer in terms of Layer 2: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Because the lower/upper symbolic bounds for output layer are equal (up to ~10^-6), + and lower/upper predecessor symbolic bounds for both branches are equal, the symbolic + bounds for every output neuron, every nonfixed neuron and branch are equal to DeepPoly. + Consequently, the BBPS-based PMNR scores for all neurons equal 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 2 ), 0 ); + } + + void test_symbolic_bound_maps_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + + x6 = -x0 - x1 - x2 + 2 + x6.lb = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6.ub = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6 range: [ -1.000003, -1 ] + + x7 = -x0 - x1 - x2 + 1 + x7.lb = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7.ub = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7 range: [ -2.000003, -2 ] + */ + + // First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + unsigned size = nlr.getLayer( 2 )->getActivationSources( 0 ).size(); + Vector sourceLbs = { 1.999899, -0.000003, -2.000103 }; + Vector sourceUbs = { 2.000102, 0.0001, -1.999 }; + Vector sourceMids = { 2.0000005, -0.0000015, -2.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1035, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1036, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + + // Second Sigmoid: x9 x11 = softmax( x4, x6 ). + size = nlr.getLayer( 2 )->getActivationSources( 1 ).size(); + sourceLbs = Vector( { 2.999899, -1.000103 } ); + sourceUbs = Vector( { 3.000102, -0.9999 } ); + sourceMids = Vector( { 3.0000005, -1.0000015 } ); + targetLbs = Vector( size, 0 ); + targetUbs = Vector( size, 0 ); + symbolicLb = Vector( size * size, 0 ); + symbolicUb = Vector( size * size, 0 ); + symbolicLowerBias = Vector( size, 0 ); + symbolicUpperBias = Vector( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( compareVectors( symbolicLowerBias, Vector( { 0.9114, 0.0886 } ) ) ); + TS_ASSERT( compareVectors( symbolicUpperBias, Vector( { 0.9114, 0.0886 } ) ) ); + + /* + Layer 2: + + First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 + x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 + x8 range: [ 0.8668, 0.8668 ] + + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 x10.ub = -0.2033 x0 + 0.0000 x1 - + 0.2033 x2 + 0.5241 x10 range: [ 0.1173, 0.1173 ] + + -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + + 0.0747 x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 x12.ub = -0.0275 x0 + 0.0001 x1 - + 0.0275 x2 + 0.0708 x12 range: [ 0.0159, 0.0159 ] + + Second Sigmoid: x9 x11 = softmax( x4, x6 ). + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9 range: [ 0.9820, 0.0180 ] + + -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11 range: [ 0.9820, 0.0180 ] + + Layer 3: + + x13 = x8 + x10 + x12 + => x13 = ( 0.1155 - 0.1017 - 0.0138 ) x3 + ( -0.1017 + 0.1035 - 0.0019 ) x5 + + ( -0.0138 - 0.0019 + 0.0156 ) x7 + ( 0.6084 + 0.3170 + 0.0747 ) + + => x13 = 0 x3 - 0.0001 x5 - 0.0001 x7 + 1.0001 + => ( Up to rounding ) 1 <= x13 <= 1. + x13.lb = 1 + x13.ub = 1 + x13 range: [ 1, 1 ] + + x14 = - x8 - x10 - x12 + => x14 = - ( 0.1155 - 0.1017 - 0.0138 ) x3 - ( -0.1017 + 0.1035 - 0.0019 ) x5 + - ( -0.0138 - 0.0019 + 0.0156 ) x7 - ( 0.6084 + 0.3170 + 0.0747 ) + + => x14 = 0 x3 + 0.0001 x5 + 0.0001 x7 - 1.0001 + => ( Up to rounding ) -1 <= x14 <= -1. + x14.lb = -1 + x14.ub = -1 + x14 range: [ -1, -1 ] + + x15 = x9 + x11 + => x15 = ( 0.0177 - 0.0177 ) x4 + ( -0.0177 + 0.0177 ) x6 + ( 0.9114 + 0.0886 ) + + => x15 = 0 x4 + 0 x6 + 1 + => ( Up to rounding ) 1 <= x15 <= 1. + x15.lb = 1 + x15.ub = 1 + x15 range: [ 1, 1 ] + + x16 = - x9 - x11 + => x16 = - ( 0.0177 - 0.0177 ) x4 - ( -0.0177 + 0.0177 ) x6 - ( 0.9114 + 0.0886 ) + + => x16 = 0 x4 + 0 x6 - 1 + => ( Up to rounding ) -1 <= x16 <= -1. + x16.lb = -1 + x16.ub = -1 + x16 range: [ -1, -1 ] + */ + + List expectedBounds( { + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.86681, Tightening::LB ), Tightening( 8, 0.86682, Tightening::UB ), + Tightening( 9, 0.98201, Tightening::LB ), Tightening( 9, 0.98201, Tightening::UB ), + Tightening( 10, 0.11731, Tightening::LB ), Tightening( 10, 0.11731, Tightening::UB ), + Tightening( 11, 0.017985, Tightening::LB ), Tightening( 11, 0.017986, Tightening::UB ), + Tightening( 12, 0.015875, Tightening::LB ), Tightening( 12, 0.015876, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 -0.0138 x3 - + 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x13 <= x13 <= x13 + x14 <= x14 <= x14 + x15 <= x15 <= x15 + x16 <= x16 <= x16 + + Layer 2: + Using x13 = x8 + x10 + x12, x14 = -x8 - x10 - x12, x15 = x9 + x11, x16 = -x9 - x11: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12 + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12 + x9 + x11 <= x15 <= x9 + x11 + -x9 - x11 <= x16 <= -x9 - x11 + + Layer 1: + Using + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 -0.0138 x3 - + 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + 1 <= x13 <= 1 + -1 <= x14 <= -1 + 1 <= x15 <= 1 + -1 <= x16 <= -1 + + Layer 0: + 1 <= x13 <= 1 + -1 <= x14 <= -1 + 1 <= x15 <= 1 + -1 <= x16 <= -1 + */ + comparePredecessorSymbolicBounds( + nlr, + 2, + Vector( { 0.1155, + 0.0177, + -0.1017, + -0.0177, + -0.0138, + -0.1017, + -0.0177, + 0.1035, + 0.0177, + -0.0019, + -0.0138, + 0.0000, + -0.0019, + 0.0000, + 0.0156 } ), + Vector( { 0.1155, + 0.0177, + -0.1017, + -0.0177, + -0.0138, + -0.1017, + -0.0177, + 0.1036, + 0.0177, + -0.0019, + -0.0138, + 0.0000, + -0.0019, + 0.0000, + 0.0156 } ), + Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ), + Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ) ); + + compareOutputSymbolicBounds( + nlr, + 3, + Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), + Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ) ); + compareOutputSymbolicBounds( + nlr, + 2, + Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), + Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( 20, 0 ), + Vector( 20, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( 12, 0 ), + Vector( 12, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + + // Non-fixed activation neurons: x8 (SOFTMAX), x9 (SOFTMAX), x10 (SOFTMAX), x11 (SOFTMAX), + // x12 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ), + NLR::NeuronIndex( 2, 3 ), + NLR::NeuronIndex( 2, 4 ) } ) ); + } + + void test_bbps_selection_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x3, 2) for x8 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 2 } ) ); + + // Using branching point (x4, 3) for x9 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 3 } ) ); + + // Using branching point (x5, 0) for x10 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 2 ), + std::pair( { NLR::NeuronIndex( 1, 2 ), 0 } ) ); + + // Using branching point (x6, -1) for x11 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 3 ), + std::pair( { NLR::NeuronIndex( 1, 3 ), -1 } ) ); + + // Using branching point (x7, -2) for x12 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 4 ), + std::pair( { NLR::NeuronIndex( 1, 4 ), -2 } ) ); + + /* + Symbolic bounds of x8 in terms of predecessor (for both branches, since range(x3) < + 0.0001): 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + + 0.6084. Concretizing x5, x7: 0.1155 x3 + 0.6360 <= x8 <= 0.1155 x3 + 0.6360. + + Symbolic bounds of x9 in terms of predecessor (for both branches, since range(x4) < + 0.0001): 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114. Concretizing + x6: 0.0177 x4 + 0.9291 <= x9 <= 0.0177 x4 + 0.9291. + + Symbolic bounds of x10 in terms of predecessor (for both branches, since range(x5) < + 0.0001): -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 + x7 + 0.3170. Concretizing x3, x7: 0.1035 x5 + 0.1174 <= x8 <= 0.1036 x5 + 0.1174. + + Symbolic bounds of x11 in terms of predecessor (for both branches, since range(x6) < + 0.0001): -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886. Concretizing + x4: 0.0177 x4 + 0.0356 <= x11 <= 0.0177 x4 + 0.0356. + + Symbolic bounds of x12 in terms of predecessor (for both branches, since range(x7) < + 0.0001): -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 + x7 + 0.0747. Concretizing x3, x5: 0.0156 x7 + 0.0471 <= x12 <= 0.0156 x7 + 0.0471. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0.1155, 0.1155 } ), + Vector( { 0.1155, 0.1155 } ), + Vector( { 0.6360, 0.6360 } ), + Vector( { 0.6360, 0.6360 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0.0177, 0.0177 } ), + Vector( { 0.0177, 0.0177 } ), + Vector( { 0.9291, 0.9291 } ), + Vector( { 0.9291, 0.9291 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 2 ), + Vector( { 0.1035, 0.1035 } ), + Vector( { 0.1036, 0.1036 } ), + Vector( { 0.1174, 0.1174 } ), + Vector( { 0.1174, 0.1174 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 3 ), + Vector( { 0.0177, 0.0177 } ), + Vector( { 0.0177, 0.0177 } ), + Vector( { 0.0356, 0.0356 } ), + Vector( { 0.0356, 0.0356 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 4 ), + Vector( { 0.0156, 0.0156 } ), + Vector( { 0.0156, 0.0156 } ), + Vector( { 0.0471, 0.0471 } ), + Vector( { 0.0471, 0.0471 } ) ); + + /* + Calculating BBPS-based PMNR score of x8, x9, x10, x11, x12: + Symbolic bounds of output layer in terms of Layer 2: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12 + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12 + x9 + x11 <= x15 <= x9 + x11 + -x9 - x11 <= x16 <= -x9 - x11 + + Because the lower/upper symbolic bounds for output layer are equal (up to ~10^-6), + and lower/upper predecessor symbolic bounds for both branches are equal, the concrete + bounds for every output neuron, every nonfixed neuron and branch are equal to DeepPoly. + Consequently, the BBPS-based PMNR scores for all neurons equal 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 2 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 3 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 4 ), 0 ); + } + + void test_symbolic_bound_maps_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layers 1, 2: + + x2 = x0 - 2x1 + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb x3.ub = --1 * 3 = 3 + + -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3 + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-6, 18] + + Layer 3: + + x5 = -x4 + => -3x2 + x3 - 3 <= x4 <= x2 + x3 + 1 + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + x1 - 1 ) = 2x0 - x1 + 1 : [2, 7] + x5 range: [-18, 6] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -6, Tightening::LB ), + Tightening( 4, 18, Tightening::UB ), + Tightening( 5, -18, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (BILINEAR): + -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x5 <= x5 <= x5 + + Layer 2: + Using x5 = -x4: + -x4 <= x4 <= -x4 + + Layer 1: + Using -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3: + -3x2 + x3 - 3 <= x5 <= x2 + x3 + 1 + + Layer 0: + Using x2 = x0 - 2x1, x3 = x0 + x1: + -2x0 + 7x1 - 3 <= x5 <= 2x0 - x1 + 1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { -1, -1 } ), + Vector( { 3, -1 } ), + Vector( { -1 } ), + Vector( { 3 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -1 } ), + Vector( { -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -3, 1 } ), + Vector( { 1, 1 } ), + Vector( { -3 } ), + Vector( { 1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -2, 7 } ), + Vector( { 2, -1 } ), + Vector( { -3 } ), + Vector( { 1 } ) ); + + // Non-fixed activation neurons: x4 (BILINEAR). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_bbps_selection_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x3, 0.4902) for x4 (BILINEAR). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 0.49016 } ) ); + + /* + Coefficients for bilinear layer (lower branch, x2: [-1, 6], x3: [-1, 0.49016]): + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 0.49016 + beta = x2.lb = -1 + gamma_u = -x2.lb x3.ub = --1 * 0.49016 = 0.49016 + + -x2 - x3 - 1 <= x4 <= 0.49016 x2 - x3 + 0.49016. + Concretizing x2: -x3 - 7 <= x4 <= -x3 + 3.4314. + + Coefficients for bilinear layer (upper branch, x2: [-1, 6], x3: [0.49016, 3]): + Lower bound: + alpha_l = x3.lb = 0.49016 + beta = x2.lb = -1 + gamma_l = -x2.lb x3.lb = --1 * 0.49016 = 0.49016 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb x3.ub = --1 * 3 = 3 + + 0.49016 x2 - x3 + 0.49016 <= x4 <= 3x2 - x3 + 3. + Concretizing x2: -x3 <= x4 <= -x3 + 21. + + Lower branch symbolic bounds: -x3 - 7 <= x4 <= -x3 + 3.4314. + Upper branch symbolic bounds: -x3 <= x4 <= -x3 + 21. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { -7, 0 } ), + Vector( { 3.4314, 21 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: -x4 <= x5 <= -x4. + + Lower branch, using x3: [-1, 1/3], -x3 - 7 <= x4 <= -x3 + 3.4314: + Output symbolic bounds x3 - 3.4314 <= x6 <= x3 + 7. + Upper branch, using x3: [1/3, 3], -x3 <= x4 <= -x3 + 21: + Output symbolic bounds x3 - 21 <= x6 <= x3. + + Summing over all branches: + Lower symbolic expression: 2x3 - 24.4314 >= -26.4314. + Upper symbolic expression: 2x3 + 7 <= 13. + + Final score = ( 13 - (-26.4314) ) / 2 = 19.7157. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 19.7157 ); + } + + void test_parameterised_symbolic_bound_maps_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => x2 - x3 <= x6 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + x2 <= x4 <= x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 <= x6 <= x0 + 2x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = -x0 - x1 : [-11, -5] + x6.ub = -x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [-2, 12] + 0.5 x2 <= x4 <= 0.75 x2 + 3 + x4.lb = 0.5 ( 2x0 + 3x1 - 15 ) = x0 + 1.5 x1 - 7.5 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> 0.5 x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + x6.lb = 0.5 x1 - 7.5 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [0.5 - 7.5 = -7, 3 + 6.25 - 8.25 = 1] = [-7, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -2, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -7, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x2 <= x4 <= 0.75 x2 + 3 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + 0.5 x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + 0.5 x1 - 7.5 <= x6 <= 0.5x0 + 1.25x1 - 8.25 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.5, 1 } ), + Vector( { 0.75, 1 } ), + Vector( { 0, 0 } ), + Vector( { 3, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.5, -1 } ), + Vector( { 0.75, -1 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0.5 } ), + Vector( { 0.5, 1.25 } ), + Vector( { -7.5 } ), + Vector( { -8.25 } ) ); + + // Non-fixed activation neurons: x4 (RELU). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1. 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0.5 x0 + x2.ub = 0.5 x0 + 0.5 + x2 range: [-0.5, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5 x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5 x0 ) -1x0 + 1 = -1.5 x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + x4.lb = 0.5 ( -1.5 x0 + 0.5 ) = -0.75 x0 + 0.25 : [-0.5, 1] + x4.ub = 5/7 ( -1.5 x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [1, 35/14 = 2.5] + x4 range: [-0.5, 2.5] + + Layer 5 (with residual from x1): + + x5 = 3x4 + 3x1 + 1 + x5.lb = 3 ( -0.75 x0 + 0.25 ) + 3 ( x0 ) + 1 = 0.75x0 + 1.75 : [1, 2.5] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + x5 range: [1, 5.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, -0.5, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + + Layer 4 (RELU): + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x5 <= x5 <= x5 + + Layer 4: + Using x5 = 3x4 + 3x1 + 1: + 3x4 + 3x1 + 1 <= x5 <= 3x4 + 3x1 + 1 + Concretizing residual using x1 : [-1, 1]: 3x4 - 2 <= x5 <= 3x4 + 4 + + Layer 3: + Using 0.5 x3 <= x4 <= 5/7 x3 + 5/7: + 1.5 x3 + 3x1 + 1 <= x5 <= 15/7 x3 + 3x1 + 22/7 + Concretizing residual using x1 : [-1, 1]: 1.5 x3 - 2 <= x5 <= 15/7 x3 + 43/7 + + Layer 2: + Using x3 = -x2 - x0 + 1: + -1.5 x2 + 3x1 - 1.5 x0 + 2.5 <= x5 <= -15/7 x2 + 3x1 - 15/7 x0 + 37/7 + Concretizing residual using x0 : [-1, 1], x1 : [-1, 1]: -1.5 x2 - 2 <= x5 <= -15/7 x2 + + 73/7 + + Layer 1: + Using 0.5 x1 <= x2 <= 0.5x1 + 0.5: + 2.25 x1 - 1.5 x0 + 1.75 <= x5 <= 27/14 x1 - 15/7 x0 + 37/7 + Concretizing residual using x0 : [-1, 1]: 2.25x1 + 0.25 <= x5 <= 27/14 x1 + 52/7 + + Layer 0: + Using x1 = x0: + 0.75 x0 + 1.75 <= x5 <= -3/14 x0 + 37/7 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.5 } ), + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 0.5 } ), + Vector( { 0.7143 } ), + Vector( { 0 } ), + Vector( { 0.7143 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { -2 } ), + Vector( { 4 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1.5 } ), + Vector( { 2.1429 } ), + Vector( { -2 } ), + Vector( { 6.1429 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -1.5 } ), + Vector( { -2.1429 } ), + Vector( { -2 } ), + Vector( { 10.4286 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 2.25 } ), + Vector( { 1.9286 } ), + Vector( { 0.25 } ), + Vector( { 7.4286 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.75 } ), + Vector( { -0.2143 } ), + Vector( { 1.75 } ), + Vector( { 5.2857 } ) ); + + // Non-fixed activation neurons: x2 (RELU), x4 (RELU). + compareNonfixedNeurons( + nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper cCoefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [-0.5, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5 x0 ) -1x0 + 1 = -1.5 x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + x4.lb = 0.5 ( -1.5 x0 + 0.5 ) = -0.75 x0 + 0.25 : [-0.5, 1] + x4.ub = 5/7 ( -1.5 x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [1, 35/14 = 2.5] + x4 range: [-0.5, 2.5] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 + x5.lb = 3 ( -0.75 x0 + 0.25 ) + ( x0 ) + 1 = -1.25x0 + 1.75 : [0.5, 3] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x5 range: [0.5, 7.5] + + Layer 6: + x6 = x5 + x6.lb = -1.25x0 + 1.75 : [0.5, 3] + x6.ub = -31/14 x0 + 74/14 : [43/14, 7.5] + x6 range: [0.5, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, -0.5, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0.5, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + + Layer 4 (RELU): + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 6: + x6 <= x6 <= x6 + + Layer 5: + Using x6 = x5: + x5 <= x6 <= x5 + + Layer 4: + Using x5 = 3x4 + x0 + 1: + 3x4 + x0 + 1 <= x6 <= 3x4 + x0 + 1 + Concretizing residual using x0 : [-1, 1]: 3x4 <= x6 <= 3x4 + 2 + + Layer 3: + Using 0.5 x3 <= x4 <= 5/7 x3 + 5/7: + 1.5 x3 + x0 + 1 <= x6 <= 15/7 x3 + x0 + 22/7 + Concretizing residual using x0 : [-1, 1]: 1.5 x3 <= x6 <= 15/7 x3 + 29/7 + + Layer 2: + Using x3 = -x2 - x0 + 1: + -1.5 x2 - 0.5 x0 + 2.5 <= x6 <= -15/7 x2 - 8/7 x0 + 37/7 + Concretizing residual using x0 : [-1, 1]: -1.5 x2 + 2 <= x6 <= -15/7 x2 + 45/7 + + Layer 1: + Using 0.5 x1 <= x2 <= 0.5x1 + 0.5: + -0.75x1 - 0.5 x0 + 1.75 <= x6 <= -15/14 x1 - 8/7 x0 + 37/7 + Concretizing residual using x0 : [-1, 1]: -0.75x1 + 1.25 <= x6 <= -15/14 x1 + 45/7 + + Layer 0: + Using x1 = x0: + -1.25 x0 + 1.75 <= x6 <= -31/14 x0 + 37/7 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.5 } ), + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 0.5 } ), + Vector( { 0.7143 } ), + Vector( { 0 } ), + Vector( { 0.7143 } ) ); + + compareOutputSymbolicBounds( nlr, + 6, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { 0 } ), + Vector( { 2 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1.5 } ), + Vector( { 2.1429 } ), + Vector( { 0 } ), + Vector( { 4.1429 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -1.5 } ), + Vector( { -2.1429 } ), + Vector( { 2 } ), + Vector( { 6.4286 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -0.75 } ), + Vector( { -1.0714 } ), + Vector( { 1.25 } ), + Vector( { 6.4286 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1.25 } ), + Vector( { -2.2143 } ), + Vector( { 1.75 } ), + Vector( { 5.2857 } ) ); + + // Non-fixed activation neurons: x2 (RELU), x4 (RELU). + compareNonfixedNeurons( + nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + 0.5 x2 <= x4 <= 0.5x2 + 1 + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [-1, 2] + + 0.5 x3 <= x5 <= 0.5x3 + 1 + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [-1, 2] + + Layers 3, 4: + + x6 = x4 + x5 + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7 = x4 - x5 + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5. + Upper coefficient (first ReLU): 3/( 3--1 ) = 3/4 = 0.75 + Upper coefficient (second ReLU): 2/( 2--2 ) = 2/4 = 0.5 + + 0.5 x6 <= x8 <= 0.75 x6 + 0.75 + x8.lb = 0.5 ( x0 ) = 0.5 x0 + x8.ub = 0.75 ( x0 + 2 ) + 0.75 = 0.75 x0 + 2.25 + x8 range: [-0.5, 3] + + 0.5 x7 <= x9 <= 0.5 x7 + 1 + x9.lb = 0.5 ( x1 - 1 ) = 0.5 x1 - 0.5 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [-1, 2] + + Layer 5: + x10 = x8 + x9 + 1 + x10.lb = 1 ( 0.5 x6 ) + 1 ( 0.5 x7 ) + 1 = ( 0.5 x4 + 0.5x5 ) + 1 ( 0.5 x4 - 0.5x5 ) + 1 + = x4 + 1 >= 0.5 x2 + 1 = 0.5 x0 + 0.5x1 + 1 : [0, 2] + x10.ub = 1 ( 0.75 x6 + 0.75 ) + 1 ( 0.5 x7 + 1 ) + 1 + = ( 0.75 x4 + 0.75 x5 + 0.75 ) + 1 ( 0.5 x4 - 0.5x5 + 1 ) + 1 + = 1.25 x4 + 0.25 x5 + 2.75 <= 0.625 x4 + 0.125 x5 + 4.25 + = 0.75 x0 + 0.5 x1 + 4.25 : [2.5, 5.5] + x10 range: [0, 5.5] + + x11 = x9 + x11.lb = 0.5 x1 - 0.5 : [-1, 0] + x11.ub = 0.5x1 + 1.5 : [1, 2] + x11 range: [-1, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x2 <= x4 <= 0.5x2 + 1 + 0.5 x3 <= x5 <= 0.5x3 + 1 + + Layer 4 (RELU): + 0.5 x6 <= x8 <= 0.75 x6 + 0.75 + 0.5 x7 <= x9 <= 0.5 x7 + 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x10 <= x10 <= x10 + x11 <= x11 <= x11 + + Layer 4: + Using x10 = x8 + x9 + 1, x11 = x9: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1 + x9 <= x11 <= x9 + + Layer 3: + Using 0.5 x6 <= x8 <= x6, 0.5 x7 <= x9 <= 0.5 x7 + 1: + 0.5 x6 + 0.5 x7 + 1 <= x10 <= 0.75 x6 + 0.5 x7 + 2.75 + 0.5 x7 <= x11 <= 0.5 x7 + 1 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + 1 <= x10 <= 1.25 x4 + 0.25 x5 + 2.75 + 0.5 x4 - 0.5 x5 <= x11 <= 0.5 x4 - 0.5 x5 + 1 + + Layer 1: + Using 0.5 x2 <= x4 <= 0.5x2 + 1, 0.5 x3 <= x5 <= 0.5x3 + 1: + 0.5 x2 + 1 <= x10 <= 0.625 x2 + 0.125 x3 + 4.25 + 0.25 x2 - 0.25 x3 - 0.5 <= x11 <= 0.25 x2 - 0.25 x3 + 1.5 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.5 x0 + 0.5 x1 + 1 <= x10 <= 0.75 x0 + 0.5 x1 + 4.25 + 0.5 x1 - 0.5 <= x11 <= 0.5 x1 + 1.5 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.5, 0.5 } ), + Vector( { 0.5, 0.5 } ), + Vector( { 0, 0 } ), + Vector( { 1, 1 } ) ); + + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 0.5, 0.5 } ), + Vector( { 0.5, 0.75 } ), + Vector( { 0, 0 } ), + Vector( { 1, 0.75 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 0.5, 0, 0.5, 0.5 } ), + Vector( { 0.75, 0, 0.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2.75, 1 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 0, -0.5, 1, 0.5 } ), + Vector( { 0.25, -0.5, 1.25, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2.75, 1 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.5, 0.25, 0, -0.25 } ), + Vector( { 0.625, 0.25, 0.125, -0.25 } ), + Vector( { 1, -0.5 } ), + Vector( { 4.25, 1.5 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.5, 0, 0.5, 0.5 } ), + Vector( { 0.75, 0, 0.5, 0.5 } ), + Vector( { 1, -0.5 } ), + Vector( { 4.25, 1.5 } ) ); + + // Non-fixed activation neurons: x4 (RELU), x5 (RELU), x8 (RELU), x9 (RELU). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_absolute_values_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => x2 - x3 <= x5 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + x2 <= x4 <= x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 <= x6 <= x0 + 2x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_absolute_values_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => -x2 - x3 <= x5 <= -x2 - x3 + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + -x2 <= x4 <= -x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -x2 <= x4 <= -x2, x3 <= x5 <= x3: + -x2 - x3 <= x6 <= -x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1 - 30, x3 = x0 + x1: + -3x0 - 4x1 + 30 <= x6 <= -3x0 - 4x1 + 30 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -3, -4 } ), + Vector( { -3, -4 } ), + Vector( { 30 } ), + Vector( { 30 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + 0 <= x4 <= 12 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x3 <= x6 <= -x3 + 12 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + 0 <= x4 <= 12 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using 0 <= x4 <= 12, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + 12 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + 12 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 12, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + + // Non-fixed activation neurons: x4 (ABSOLUTE_VALUE). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4: all set to 3 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x2 - x3 <= x6 <= -x2 - x3 + => -x3 + 3 <= x6 <= -x3 + 3 + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + -x2 <= x4 <= -x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -x2 <= x4 <= -x2, x3 <= x5 <= x3: + -x2 - x3 <= x6 <= -x2 - x3 + x2 = -3 is eliminated. + -x3 + 3 <= x6 <= -x3 + 3 + + Layer 0: + Using x3 = x0 + x1: + - x0 - x1 + 3 <= x6 <= - x0 - x1 + 3 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSign( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficient (first Sign, lower): 2/12 * 0.5 = 1/12. + Coefficient (first Sign, upper): -2/-4 * 0.5 = 1/4. + + 1/12 x2 - 1 <= x4 <= 1/4 x2 + 1 + x4.lb = 1/12 ( 2x0 + 3x1 - 15 ) - 1 = 2/12 x0 + 3/12 x1 - 27/12 + x4.ub = 1/4 ( 2x0 + 3x1 - 15 ) + 1 = 0.5 x0 + 0.75x1 - 2.75 + x4 range: [-1, 1] + + 1 <= x5 <= 1 + x5.lb = 1 + x5.ub = 1 + x5 range: [1, 1] + + Layer 3: + + x6 = x4 - x5 : [-2, 0] + => 1/12 x2 - 2 <= x6 <= 1/4 x2 : [-8/3, 6] + x6.lb = 1 ( 2/12 x0 + 3/12 x1 - 27/12 ) - 1 ( 1 ) = 2/12 x0 + 3/12 x1 - 39/12 : + [-28/12 = -7/3, -1] + x6.ub = 1 ( 0.5 x0 + 0.75x1 - 2.75 ) - 1 ( 1 ) = 0.5 x0 + 0.75x1 - 3.75 : [-1, 3] + + x6 range: [-2, 0] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 0, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGN): + 1/12 x2 - 1 <= x4 <= 1/4 x2 + 1 + 1 <= x5 <= 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using 1/12 x2 - 1 <= x4 <= 1/4 x2 + 1, 1 <= x5 <= 1: + 1/12 x2 - 2 <= x6 <= 1/4 x2 + + Layer 0: + Using x2 = 2x0 + 3x1 - 15: + 1/6 x0 + 1/4 x1 - 3.25 <= x6 <= 0.5 x0 + 0.75x1 - 3.75 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.0833, 0 } ), + Vector( { 0.25, 0 } ), + Vector( { -1, 1 } ), + Vector( { 1, 1 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.0833, 0 } ), + Vector( { 0.25, 0 } ), + Vector( { -2 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.1667, 0.25 } ), + Vector( { 0.5, 0.75 } ), + Vector( { -3.25 } ), + Vector( { -3.75 } ) ); + + // Non-fixed activation neurons: x4 (SIGN). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSign( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + -1 <= x4 <= 1 + x4: all set to -1 + + 1 <= x5 <= 1 + x5: all set to 1 + + Layer 3: + + x6 = x5 - x4 + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGN): + -1 <= x4 <= -1 + 1 <= x5 <= 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -1 <= x4 <= -1, 1 <= x5 <= 1: + -2 <= x6 <= -2 + + Layer 0: + -2 <= x6 <= -2 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient: ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias: 0 + Upper Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias: ( 0.2 - 1 ) * 2 * -2 /( 2--2 ) = 0.8 + + 0.6 x2 <= x4 <= 0.6 x2 + 0.8 + x4.lb = 0.6 ( x0 + x1 ) = 0.6 x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6 x0 + 0.6 x1 + 0.8 + x4 range: [-1.2, 2] + + 0.6 x3 <= x5 <= 0.6 x3 + 0.8 + x5.lb = 0.6 ( x0 - x1 ) = 0.6 x0 - 0.6 x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6 x0 - 0.6 x1 + 0.8 + x5 range: [-1.2, 2] + + Layer 2: + + x6 = x4 + x5 + x6.lb = 1 ( 0.6x0 + 0.6x1 ) + 1 ( 0.6x0 - 0.6x1 ) = 1.2 x0 : [-1.2, 1.2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x0 + 1.6 : + [0.4, 2.8] x6 range: [-1.2, 2.8] + + x7 = x4 - x5 + x7.lb = 1 ( 0.6x0 + 0.6x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x1 - 0.8 : [-2, 0.4] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( 0.6x0 - 0.6x1 ) = 1.2 x1 + 0.8 : [-0.4, 2] + x7 range: [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient (first LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (first LeakyReLU): 0 + Upper Coefficient (first LeakyReLU): ( 2.8 - 0.2*-1.2 )/( 2.8--1.2 ) = 3.04/4 = 0.76 + Upper Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -1.2 / ( 2.8--1.2 ) = 0.672 + + Lower Coefficient (second LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (second LeakyReLU): 0 + Upper Coefficient (second LeakyReLU): ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias (second LeakyReLU): ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + 0.6 x6 <= x8 <= 0.76 x6 + 0.672 + x8.lb = 0.6 ( 1.2x0 ) = 0.72 x0 + x8.ub = 0.76 ( 1.2x0 + 1.6 ) + 0.672 = 0.912 x0 + 1.888 + x8 range: [-0.72, 2.8] + + 0.6 x7 <= x9 <= 0.6 x7 + 0.8 + x9.lb = 0.6 ( 1.2x1 - 0.8 ) = 0.72 x0 - 0.48 + x9.ub = 0.6 ( 1.2x1 + 0.8 ) + 0.8 = 0.72 x1 + 1.28 + x9 range: [-1.2, 2] + + Layer 3: + + x10 = x8 + x9 + 1 + x10.lb = 0.6 x6 + 0.6 x7 + 1 >= 0.6 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 1 = + 1.2 x4 + 1 >= 1.2 ( 0.6 x2 ) + 1 = 0.72 x2 + 1 + = 0.72 x0 + 0.72 x1 + 1 : [-0.44, 2.44] + x10.lb = ( 0.76 x6 + 0.672 ) + ( 0.6 x7 + 0.8 ) + 1 = 0.76 x6 + 0.6 x7 + 2.472 + >= 0.76 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 2.472 = 1.36 x4 + 0.16 x5 + 2.472 + >= 1.36 ( 0.6 x2 + 0.8 ) + 0.16 ( 0.6 x3 + 0.8 ) + 2.472 + = 0.816 x2 + 0.096 x3 + 3.688 = 0.912 x0 + 0.72 x1 + 3.688 : [2.056, 5.32] + x10 range: [-0.44, 5.32] + + x11.lb = 0.72 x0 - 0.48 : [-1.2, 0.24] + x11.ub = 0.72 x1 + 1.28 : [-0.56, 2] + x11 range: [-1.2, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1.2, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1.2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.72, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -1.2, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -0.44, Tightening::LB ), Tightening( 10, 5.32, Tightening::UB ), + Tightening( 11, -1.2, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (LEAKY_RELU): + 0.6 x2 <= x4 <= 0.6 x2 + 0.8 + 0.6 x3 <= x5 <= 0.6 x3 + 0.8 + + Layer 4 (LEAKY_RELU): + 0.6 x6 <= x8 <= 0.76 x6 + 0.672 + 0.6 x7 <= x9 <= 0.6 x7 + 0.8 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x10 <= x10 <= x10 + x11 <= x11 <= x11 + + x10 = x8 + x9 + 1 + x10.lb = >= 0.6 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 1 = + 1.2 x4 + 1 >= 1.2 ( 0.6 x2 ) + 1 = 0.72 x2 + 1 + = 0.72 x0 - 0.72 x1 + 1 : [-0.44, 2.44] + x10.lb = ( 0.76 x6 + 0.672 ) + ( 0.6 x7 + 0.8 ) + 1 = 0.76 x6 + 0.6 x7 + 2.472 + >= 0.76 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 2.472 = 1.36 x4 + 0.16 x5 + 2.472 + >= 1.36 ( 0.6 x2 + 0.8 ) + 0.16 ( 0.6 x3 + 0.8 ) + 2.472 + = 0.816 x2 + 0.096 x3 + 3.688 = 0.912 x0 - 0.72 x1 + 3.688 : [2.056, 5.32] + x10 range: [-0.44, 5.32] + + Layer 4: + Using x10 = x8 + x9 + 1, x11 = x9: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1 + x9 <= x11 <= x9 + + Layer 3: + Using 0.6 x6 <= x8 <= 0.76 x6 + 0.672, 0.6 x7 <= x9 <= 0.6 x7 + 0.8: + 0.6 x6 + 0.6 x7 + 1 <= x10 <= 0.76 x6 + 0.6 x7 + 2.472 + 0.6 x7 <= x11 <= 0.6 x7 + 0.8 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + 1.2 x4 + 1 <= x10 <= 1.36 x4 + 0.16 x5 + 2.472 + 0.6 x4 - 0.6 x5 <= x11 <= 0.6 x4 - 0.6 x5 + 0.8 + + Layer 1: + Using 0.6 x2 <= x4 <= 0.6 x2 + 0.8, 0.6 x3 <= x5 <= 0.6 x3 + 0.8: + 0.72 x2 + 1 <= x10 <= 0.816 x2 + 0.096 x3 + 3.688 + 0.36 x2 - 0.36 x3 - 0.48 <= x11 <= 0.36 x2 - 0.36 x3 + 1.28 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.72 x0 + 0.72 x1 + 1 <= x10 <= 0.912 x0 + 0.72 x1 + 3.688 + 0.72 x1 - 0.48 <= x11 <= 0.72 x1 + 1.28 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.6, 0.6 } ), + Vector( { 0.6, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.8, 0.8 } ) ); + + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 0.6, 0.6 } ), + Vector( { 0.76, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.672, 0.8 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 0.6, 0, 0.6, 0.6 } ), + Vector( { 0.76, 0, 0.6, 0.6 } ), + Vector( { 1, 0 } ), + Vector( { 2.472, 0.8 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1.2, 0.6, 0, -0.6 } ), + Vector( { 1.36, 0.6, 0.16, -0.6 } ), + Vector( { 1, 0 } ), + Vector( { 2.472, 0.8 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.72, 0.36, 0, -0.36 } ), + Vector( { 0.816, 0.36, 0.096, -0.36 } ), + Vector( { 1, -0.48 } ), + Vector( { 3.688, 1.28 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.72, 0, 0.72, 0.72 } ), + Vector( { 0.912, 0, 0.72, 0.72 } ), + Vector( { 1, -0.48 } ), + Vector( { 3.688, 1.28 } ) ); + + // Non-fixed activation neurons: x4 (LEAKY_RELU), x5 (LEAKY_RELU), x8 (LEAKY_RELU), x9 + // (LEAKY_RELU). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda8_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGMOID): + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708 + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708 + + Layer 4 (ROUND): + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x8 <= x8 <= x8 + x9 <= x9 <= x9 + + Layer 3: + Using x6 - 0.5 <= x8 <= x6 + 0.5, x7 - 0.5 <= x9 <= x7 + 0.5: + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5 + x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5 + + Layer 1: + Using + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708, + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708: + 0.1050 x2 + 0.1050 x3 + 0.1584 <= x8 <= 0.1050 x2 + 0.1050 x3 + 1.8416 + 0.1050 x2 - 0.1050 x3 - 0.8416 <= x9 <= 0.1050 x2 - 0.1050 x3 + 0.8516 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.2100 x0 + 0.1584 <= x8 <= 0.2100 x0 + 1.8416 + 0.2100 x1 - 0.8416 <= x9 <= 0.2100 x1 + 0.8516 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.3292, 0.3292 } ), + Vector( { 0.6708, 0.6708 } ) ); + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, 1, 1, -1 } ), + Vector( { 1, 1, 1, -1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + + // Non-fixed activation neurons: x4 (SIGMOID), x5 (SIGMOID), x8 (ROUND), x9 (ROUND). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layers 1, 2, 3: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5.\ + Upper coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6. + Upper coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + 0.5 x2 <= x4 <= 0.6 x2 + 1.2 + x4.lb = 0.5 ( x0 + x1 ) = 0.5 x0 + 0.5 x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [-1, 3] + + 0.5 x3 <= x5 <= 0.4 x3 + 1.2 + x5.lb = 0.5 ( x0 - x1 ) = 0.5 x0 - 0.5 x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [-1.5, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x4 <= x6 <= 3 + x6.lb = 0.5 x0 + 0.5 x1 : [-1, 1.5] + x6.ub = 3 : [3, 3] + x6 range: [-1, 3] + + Layer 4: + + x7 = 2x6 + => 2x4 <= x7 <= 6 + x7.lb = 2 ( 0.5 x0 + 0.5 x1 ) = x0 + x1 : [-2, 3] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -1.5, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x2 <= x4 <= 0.6 x2 + 1.2 + 0.5 x3 <= x5 <= 0.4 x3 + 1.2 + + Layer 3 (MAX): + x4 <= x6 <= 6 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x7 <= x7 <= x7 + + Layer 3: + Using x7 = 2x6: + 2x6 <= x7 <= 2x6 + + Layer 2: + Using x5 <= x6 <= 3: + 2x4 <= x7 <= 6 + + Layer 1: + Using 0.5 x2 <= x4 <= 0.6 x2 + 1.2: + x2 <= x7 <= 6 + + Layer 0: + Using x2 = x0 + x1: + x0 + x1 <= x7 <= 6 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.5, 0.5 } ), + Vector( { 0.6, 0.4 } ), + Vector( { 0, 0 } ), + Vector( { 1.2, 1.2 } ) ); + comparePredecessorSymbolicBounds( nlr, + 3, + Vector( { 1, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 2, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + + // Non-fixed activation neurons: x4 (RELU), x5 (RELU), x6 (MAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 3, 0 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + 0 <= x4 <= 0 + x4: all set to 0 + + x3 <= x5 <= x3 + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x5 <= x6 <= x5 + => x3 <= x6 <= x5 + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7 = 2x6 + => x7 = 2x5 = 2x3 = 2x0 - 2x1 + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Layer 3 (MAX): + x5 <= x6 <= x5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x7 <= x7 <= x7 + + Layer 3: + Using x7 = 2x6: + 2x6 <= x7 <= 2x6 + + Layer 2: + Using x5 <= x6 <= x5: + 2x5 <= x7 <= 2x5 + + Layer 1: + Using x3 <= x5 <= x3: + 2x3 <= x7 <= 2x3 + + Layer 0: + Using x3 = x0 - x1 + 2x0 - 2x1 <= x7 <= 2x0 - 2x1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + comparePredecessorSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 2, -2 } ), + Vector( { 2, -2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + } + + void test_parameterised_symbolic_bound_maps_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x9 <= x9 <= x9 + x10 <= x10 <= x10 + + Layer 2: + Using x9 = x6 + x7 + x8, x10 = -x6 - x7 - x8: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Layer 1: + Using + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277: 1 <= x9 <= 1 -1 <= x10 <= -1 + + Layer 0: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + + // Non-fixed activation neurons: x6 (SOFTMAX), x7 (SOFTMAX), x8 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ) } ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, i ); // Using er + symbolicUpperBias[i] = + NLR::Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x9 <= x9 <= x9 + x10 <= x10 <= x10 + + Layer 2: + Using x9 = x6 + x7 + x8, x10 = -x6 - x7 - x8: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Layer 1: + Using + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277: 1 <= x9 <= 1 -1 <= x10 <= -1 + + Layer 0: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + + // Non-fixed activation neurons: x6 (SOFTMAX), x7 (SOFTMAX), x8 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ) } ) ); + } + } + + void test_parameterised_symbolic_bound_maps_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + + x6 = -x0 - x1 - x2 + 2 + x6.lb = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6.ub = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6 range: [ -1.000003, -1 ] + + x7 = -x0 - x1 - x2 + 1 + x7.lb = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7.ub = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7 range: [ -2.000003, -2 ] + */ + + // First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + unsigned size = nlr.getLayer( 2 )->getActivationSources( 0 ).size(); + Vector sourceLbs = { 1.999899, -0.000003, -2.000103 }; + Vector sourceUbs = { 2.000102, 0.0001, -1.999 }; + Vector sourceMids = { 2.0000005, -0.0000015, -2.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1035, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1036, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + + // Second Sigmoid: x9 x11 = softmax( x4, x6 ). + size = nlr.getLayer( 2 )->getActivationSources( 1 ).size(); + sourceLbs = Vector( { 2.999899, -1.000103 } ); + sourceUbs = Vector( { 3.000102, -0.9999 } ); + sourceMids = Vector( { 3.0000005, -1.0000015 } ); + targetLbs = Vector( size, 0 ); + targetUbs = Vector( size, 0 ); + symbolicLb = Vector( size * size, 0 ); + symbolicUb = Vector( size * size, 0 ); + symbolicLowerBias = Vector( size, 0 ); + symbolicUpperBias = Vector( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( compareVectors( symbolicLowerBias, Vector( { 0.9114, 0.0886 } ) ) ); + TS_ASSERT( compareVectors( symbolicUpperBias, Vector( { 0.9114, 0.0886 } ) ) ); + + /* + Layer 2: + + First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 + x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 + x8 range: [ 0.8668, 0.8668 ] + + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 x10.ub = -0.2033 x0 + 0.0000 x1 - + 0.2033 x2 + 0.5241 x10 range: [ 0.1173, 0.1173 ] + + -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + + 0.0747 x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 x12.ub = -0.0275 x0 + 0.0001 x1 - + 0.0275 x2 + 0.0708 x12 range: [ 0.0159, 0.0159 ] + + Second Sigmoid: x9 x11 = softmax( x4, x6 ). + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9 range: [ 0.9820, 0.0180 ] + + -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11 range: [ 0.9820, 0.0180 ] + + Layer 3: + + x13 = x8 + x10 + x12 + => x13 = ( 0.1155 - 0.1017 - 0.0138 ) x3 + ( -0.1017 + 0.1035 - 0.0019 ) x5 + + ( -0.0138 - 0.0019 + 0.0156 ) x7 + ( 0.6084 + 0.3170 + 0.0747 ) + + => x13 = 0 x3 - 0.0001 x5 - 0.0001 x7 + 1.0001 + => ( Up to rounding ) 1 <= x13 <= 1. + x13.lb = 1 + x13.ub = 1 + x13 range: [ 1, 1 ] + + x14 = - x8 - x10 - x12 + => x14 = - ( 0.1155 - 0.1017 - 0.0138 ) x3 - ( -0.1017 + 0.1035 - 0.0019 ) x5 + - ( -0.0138 - 0.0019 + 0.0156 ) x7 - ( 0.6084 + 0.3170 + 0.0747 ) + + => x14 = 0 x3 + 0.0001 x5 + 0.0001 x7 - 1.0001 + => ( Up to rounding ) -1 <= x14 <= -1. + x14.lb = -1 + x14.ub = -1 + x14 range: [ -1, -1 ] + + x15 = x9 + x11 + => x15 = ( 0.0177 - 0.0177 ) x4 + ( -0.0177 + 0.0177 ) x6 + ( 0.9114 + 0.0886 ) + + => x15 = 0 x4 + 0 x6 + 1 + => ( Up to rounding ) 1 <= x15 <= 1. + x15.lb = 1 + x15.ub = 1 + x15 range: [ 1, 1 ] + + x16 = - x9 - x11 + => x16 = - ( 0.0177 - 0.0177 ) x4 - ( -0.0177 + 0.0177 ) x6 - ( 0.9114 + 0.0886 ) + + => x16 = 0 x4 + 0 x6 - 1 + => ( Up to rounding ) -1 <= x16 <= -1. + x16.lb = -1 + x16.ub = -1 + x16 range: [ -1, -1 ] + */ + + List expectedBounds( { + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.86681, Tightening::LB ), Tightening( 8, 0.86682, Tightening::UB ), + Tightening( 9, 0.98201, Tightening::LB ), Tightening( 9, 0.98201, Tightening::UB ), + Tightening( 10, 0.11731, Tightening::LB ), Tightening( 10, 0.11731, Tightening::UB ), + Tightening( 11, 0.017985, Tightening::LB ), Tightening( 11, 0.017986, Tightening::UB ), + Tightening( 12, 0.015875, Tightening::LB ), Tightening( 12, 0.015876, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 -0.0138 x3 - + 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x13 <= x13 <= x13 + x14 <= x14 <= x14 + x15 <= x15 <= x15 + x16 <= x16 <= x16 + + Layer 2: + Using x13 = x8 + x10 + x12, x14 = -x8 - x10 - x12, x15 = x9 + x11, x16 = -x9 - x11: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12 + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12 + x9 + x11 <= x15 <= x9 + x11 + -x9 - x11 <= x16 <= -x9 - x11 + + Layer 1: + Using + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 -0.0138 x3 - + 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 1 <= x13 <= + 1 -1 <= x14 <= -1 1 <= x15 <= 1 -1 <= x16 <= -1 + + Layer 0: + 1 <= x13 <= 1 + -1 <= x14 <= -1 + 1 <= x15 <= 1 + -1 <= x16 <= -1 + */ + comparePredecessorSymbolicBounds( + nlr, + 2, + Vector( { 0.1155, + 0.0177, + -0.1017, + -0.0177, + -0.0138, + -0.1017, + -0.0177, + 0.1035, + 0.0177, + -0.0019, + -0.0138, + 0.0000, + -0.0019, + 0.0000, + 0.0156 } ), + Vector( { 0.1155, + 0.0177, + -0.1017, + -0.0177, + -0.0138, + -0.1017, + -0.0177, + 0.1036, + 0.0177, + -0.0019, + -0.0138, + 0.0000, + -0.0019, + 0.0000, + 0.0156 } ), + Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ), + Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ) ); + + compareOutputSymbolicBounds( + nlr, + 3, + Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), + Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ) ); + compareOutputSymbolicBounds( + nlr, + 2, + Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), + Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( 20, 0 ), + Vector( 20, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( 12, 0 ), + Vector( 12, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + + // Non-fixed activation neurons: x8 (SOFTMAX), x9 (SOFTMAX), x10 (SOFTMAX), x11 (SOFTMAX), + // x12 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ), + NLR::NeuronIndex( 2, 3 ), + NLR::NeuronIndex( 2, 4 ) } ) ); + } + + void test_parameterised_symbolic_bound_maps_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layers 1, 2: + + x2 = x0 - 2x1 + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficients for bilinear layer: + Lower bound: + alpha_l = 0.5 x3.lb + ( 1 - 0.5 ) x3.ub = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.lb - ( 1 - 0.5 ) x2.ub x3.ub = -0.5 * -1 * -1 - 0.5 * 6 * 3 = + -9.5. + + Upper bound: + alpha_l = 0.5 x3.ub + ( 1 - 0.5 ) x3.lb = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.ub - ( 1 - 0.5 ) x2.ub x3.lb = -0.5 * -1 * 6 - 0.5 * -1 * 3 + = 4.5. + + S = { x2.lb x3.lb, x2.ub x3.lb, x2.lb x3.ub, x2.ub x3.ub } = { 1, -3, -6, 18 } + -6 <= min S <= x4 <= max S = 18 + x2 + 2.5 x3 - 9.5 <= x4 <= x2 + 2.5 x3 + 4.5 + x4.lb = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) - 9.5 = 3.5 x0 + 0.5 x1 - 9.5 : [-7, -2] + x4.ub = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) + 4.5 = 3.5 x0 + 0.5 x1 + 4.5 : [7, 12] + x4 range: [-6, 18] + + Layer 3: + + x5 = -x4 : [-18, 6] + => -x2 - 2.5 x3 - 4.5 <= x4 <= -x2 - 2.5 x3 + 9.5 + x5.lb = -1 ( 3.5 x0 + 0.5 x1 + 4.5 ) = -3.5 x0 - 0.5 x1 - 4.5 : [-12, 0] + x5.ub = -1 ( 3.5 x0 + 0.5 x1 - 9.5 ) = -3.5 x0 - 0.5 x1 + 9.5 : [2, 7] + x5 range: [-12, 6] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -6, Tightening::LB ), + Tightening( 4, 18, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (BILINEAR): + x2 + 2.5 x3 - 9.5 <= x4 <= x2 + 2.5 x3 + 4.5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x5 <= x5 <= x5 + + Layer 2: + Using x5 = -x4: + -x4 <= x5 <= -x4 + + Layer 1: + Using x2 + 2.5 x3 - 9.5 <= x4 <= x2 + 2.5 x3 + 4.5: + -x2 - 2.5 x3 - 4.5 <= x5 <= -x2 - 2.5 x3 + 9.5 + + Layer 0: + Using x2 = x0 - 2x1, x3 = x0 + x1: + -3.5 x0 - 0.5 x1 - 4.5 <= x5 <= -3.5 x0 - 0.5 x1 + 9.5 + */ + + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 1, 2.5 } ), + Vector( { 1, 2.5 } ), + Vector( { -9.5 } ), + Vector( { 4.5 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -1 } ), + Vector( { -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -1, -2.5 } ), + Vector( { -1, -2.5 } ), + Vector( { -4.5 } ), + Vector( { 9.5 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -3.5, -0.5 } ), + Vector( { -3.5, -0.5 } ), + Vector( { -4.5 } ), + Vector( { 9.5 } ) ); + + // Non-fixed activation neurons: x4 (BILINEAR). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } + + void compareOutputSymbolicBounds( NLR::NetworkLevelReasoner &nlr, + unsigned layerIndex, + const Vector &expectedSymbolicLb, + const Vector &expectedSymbolicUb, + const Vector &expectedSymbolicLowerBias, + const Vector &expectedSymbolicUpperBias ) + { + Vector outputSymbolicLb; + Vector outputSymbolicUb; + Vector outputSymbolicLowerBias; + Vector outputSymbolicUpperBias; + TS_ASSERT_THROWS_NOTHING( outputSymbolicLb = nlr.getOutputSymbolicLb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputSymbolicUb = nlr.getOutputSymbolicUb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputSymbolicLowerBias = + nlr.getOutputSymbolicLowerBias( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputSymbolicUpperBias = + nlr.getOutputSymbolicUpperBias( layerIndex ) ); + TS_ASSERT( compareVectors( outputSymbolicLb, expectedSymbolicLb ) ); + TS_ASSERT( compareVectors( outputSymbolicUb, expectedSymbolicUb ) ); + TS_ASSERT( compareVectors( outputSymbolicLowerBias, expectedSymbolicLowerBias ) ); + TS_ASSERT( compareVectors( outputSymbolicUpperBias, expectedSymbolicUpperBias ) ); + } + + void comparePredecessorSymbolicBounds( NLR::NetworkLevelReasoner &nlr, + unsigned layerIndex, + const Vector &expectedSymbolicLb, + const Vector &expectedSymbolicUb, + const Vector &expectedSymbolicLowerBias, + const Vector &expectedSymbolicUpperBias ) + { + Vector predecessorSymbolicLb; + Vector predecessorSymbolicUb; + Vector predecessorSymbolicLowerBias; + Vector predecessorSymbolicUpperBias; + TS_ASSERT_THROWS_NOTHING( predecessorSymbolicLb = + nlr.getPredecessorSymbolicLb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorSymbolicUb = + nlr.getPredecessorSymbolicUb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorSymbolicLowerBias = + nlr.getPredecessorSymbolicLowerBias( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorSymbolicUpperBias = + nlr.getPredecessorSymbolicUpperBias( layerIndex ) ); + TS_ASSERT( compareVectors( predecessorSymbolicLb, expectedSymbolicLb ) ); + TS_ASSERT( compareVectors( predecessorSymbolicUb, expectedSymbolicUb ) ); + TS_ASSERT( compareVectors( predecessorSymbolicLowerBias, expectedSymbolicLowerBias ) ); + TS_ASSERT( compareVectors( predecessorSymbolicUpperBias, expectedSymbolicUpperBias ) ); + } + + void compareBranchSymbolicBounds( NLR::NetworkLevelReasoner &nlr, + NLR::NeuronIndex index, + const Vector &expectedSymbolicLb, + const Vector &expectedSymbolicUb, + const Vector &expectedSymbolicLowerBias, + const Vector &expectedSymbolicUpperBias ) + { + Vector branchSymbolicLb; + Vector branchSymbolicUb; + Vector branchSymbolicLowerBias; + Vector branchSymbolicUpperBias; + TS_ASSERT_THROWS_NOTHING( branchSymbolicLb = nlr.getSymbolicLbPerBranch( index ) ); + TS_ASSERT_THROWS_NOTHING( branchSymbolicUb = nlr.getSymbolicUbPerBranch( index ) ); + TS_ASSERT_THROWS_NOTHING( branchSymbolicLowerBias = + nlr.getSymbolicLowerBiasPerBranch( index ) ); + TS_ASSERT_THROWS_NOTHING( branchSymbolicUpperBias = + nlr.getSymbolicUpperBiasPerBranch( index ) ); + TS_ASSERT( compareVectors( branchSymbolicLb, expectedSymbolicLb ) ); + TS_ASSERT( compareVectors( branchSymbolicUb, expectedSymbolicUb ) ); + TS_ASSERT( compareVectors( branchSymbolicLowerBias, expectedSymbolicLowerBias ) ); + TS_ASSERT( compareVectors( branchSymbolicUpperBias, expectedSymbolicUpperBias ) ); + } + + void compareNonfixedNeurons( NLR::NetworkLevelReasoner &nlr, + const Set &expectedIndices ) + { + Set indices; + for ( const auto &pair : nlr.getLayerIndexToLayer() ) + { + Vector nonfixedNeurons; + TS_ASSERT_THROWS_NOTHING( nonfixedNeurons = pair.second->getNonfixedNeurons() ); + for ( const auto index : nonfixedNeurons ) + { + indices.insert( index ); + } + } + + TS_ASSERT_EQUALS( indices.size(), expectedIndices.size() ); + for ( const auto index : indices ) + { + TS_ASSERT( expectedIndices.exists( index ) ); + } + } + + void + compareBBPSBranchingPoints( NLR::NetworkLevelReasoner &nlr, + NLR::NeuronIndex index, + const std::pair &expectedBranchingPoint ) + { + std::pair point; + TS_ASSERT_THROWS_NOTHING( point = nlr.getBBPSBranchingPoint( index ) ); + TS_ASSERT( FloatUtils::areEqual( + point.first._layer, expectedBranchingPoint.first._layer, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + point.first._neuron, expectedBranchingPoint.first._neuron, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( point.second, expectedBranchingPoint.second, 0.0001 ) ); + } + + void comparePMNRScores( NLR::NetworkLevelReasoner &nlr, + NLR::NeuronIndex index, + double expectedScore ) + { + double score = 0; + TS_ASSERT_THROWS_NOTHING( score = nlr.getPMNRScore( index ) ); + TS_ASSERT( FloatUtils::areEqual( score, expectedScore, 0.0001 ) ); + } + + bool compareVectors( const Vector &vectorA, const Vector &vectorB ) + { + if ( vectorA.size() != vectorB.size() ) + return false; + + for ( unsigned i = 0; i < vectorA.size(); ++i ) + { + if ( !FloatUtils::areEqual( vectorA[i], vectorB[i], 0.0001 ) ) + return false; + } + + return true; + } +};